From 91cab1e41661fbf61b38053b4b5fe8ce2edae376 Mon Sep 17 00:00:00 2001 From: Philippe El Asmar <53088140+philasmar@users.noreply.github.com> Date: Tue, 14 Oct 2025 13:11:23 -0400 Subject: [PATCH 01/56] Increase multipart upload default part size to 8MB (#4032) --- .../252dad9f-d2a9-4d49-bff8-000924f0adc3.json | 11 +++++++++++ .../Internal/MultipartUploadCommand.cs | 19 +++++++------------ .../Services/S3/Custom/Util/S3Constants.cs | 1 + .../IntegrationTests/TransferUtilityTests.cs | 19 +++++++++++++++++++ 4 files changed, 38 insertions(+), 12 deletions(-) create mode 100644 generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0adc3.json diff --git a/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0adc3.json b/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0adc3.json new file mode 100644 index 000000000000..402cdd2d7b1c --- /dev/null +++ b/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0adc3.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Increasing the default part size for S3 multipart upload from 5MB to 8MB when no part size is specified. This will reduce the number of API calls for multipart uploads." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index e31184e6353f..c24da9add8e0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -81,11 +81,12 @@ internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config this._s3Client = s3Client; this._fileTransporterRequest = fileTransporterRequest; this._contentLength = this._fileTransporterRequest.ContentLength; + + long targetPartSize = fileTransporterRequest.IsSetPartSize() + ? fileTransporterRequest.PartSize + : S3Constants.DefaultPartSize; - if (fileTransporterRequest.IsSetPartSize()) - this._partSize = fileTransporterRequest.PartSize; - else - this._partSize = calculatePartSize(this._contentLength); + this._partSize = calculatePartSize(this._contentLength, targetPartSize); if (fileTransporterRequest.InputStream != null) { @@ -98,15 +99,9 @@ internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config Logger.DebugFormat("Upload part size {0}.", this._partSize); } - private static long calculatePartSize(long fileSize) + private static long calculatePartSize(long contentLength, long targetPartSize) { - double partSize = Math.Ceiling((double)fileSize / S3Constants.MaxNumberOfParts); - if (partSize < S3Constants.MinPartSize) - { - partSize = S3Constants.MinPartSize; - } - - return (long)partSize; + return Math.Max(targetPartSize, contentLength / S3Constants.MaxNumberOfParts); } private string determineContentType() diff --git a/sdk/src/Services/S3/Custom/Util/S3Constants.cs b/sdk/src/Services/S3/Custom/Util/S3Constants.cs index becbf30231b7..f85ad35e00e5 100644 --- a/sdk/src/Services/S3/Custom/Util/S3Constants.cs +++ b/sdk/src/Services/S3/Custom/Util/S3Constants.cs @@ -33,6 +33,7 @@ internal static class S3Constants internal const int PutObjectDefaultTimeout = 20 * 60 * 1000; internal static readonly long MinPartSize = 5 * (long)Math.Pow(2, 20); + internal static readonly long DefaultPartSize = 8 * (long)Math.Pow(2, 20); internal const int MaxNumberOfParts = 10000; internal const int DefaultBufferSize = 8192; diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index abc767a0bdd8..cce278d328ae 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -663,6 +663,25 @@ public void MultipartGetNumberTest() } } + [TestMethod] + [TestCategory("S3")] + public void MultipartValidatePartSize8MbTest() + { + string key = "MultipartValidatePartSizeTest"; + + Upload(key, 20 * MEG_SIZE, null, Client); + + var objectMetadataResponse = Client.GetObjectMetadata(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1, + }); + + Assert.AreEqual(3, objectMetadataResponse.PartsCount); + Assert.AreEqual(8 * MEG_SIZE, objectMetadataResponse.ContentLength); + } + void Upload(string fileName, long size, TransferProgressValidator progressValidator, AmazonS3Client client = null) { From 5a38b60704d2be43b032ba7e6d9d179043f7d7c0 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 20 Oct 2025 10:03:41 -0400 Subject: [PATCH 02/56] Added PutObjectResponse to TransferUtilityUploadResponse mapping (#4045) --- .../433a9a6d-b8ea-4676-b763-70711e8288e4.json | 11 + .../Generators/SourceFiles/AssemblyInfo.cs | 35 +- .../Generators/SourceFiles/AssemblyInfo.tt | 4 + generator/ServiceModels/_manifest.json | 3 +- .../Transfer/Internal/ResponseMapper.cs | 104 ++++ .../Transfer/TransferUtilityUploadResponse.cs | 470 ++++++++++++++++++ .../Services/S3/Properties/AssemblyInfo.cs | 2 + .../AWSSDK.UnitTests.S3.NetFramework.csproj | 3 + .../Custom/EmbeddedResource/mapping.json | 291 +++++++++++ .../EmbeddedResource/property-aliases.json | 10 + .../UnitTests/Custom/ResponseMapperTests.cs | 458 +++++++++++++++++ .../AWSSDK.UnitTests.NetFramework.csproj | 1 + 12 files changed, 1381 insertions(+), 11 deletions(-) create mode 100644 generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json create mode 100644 sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json create mode 100644 sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json new file mode 100644 index 000000000000..166d9469d903 --- /dev/null +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added PutObjectResponse to TransferUtilityUploadResponse mapping" + ] + } + ] +} diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs index ecf1a83dd482..156c2b897efe 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs @@ -15,7 +15,7 @@ namespace ServiceClientGenerator.Generators.SourceFiles /// Class to produce the template output /// - #line 1 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 1 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "17.0.0.0")] public partial class AssemblyInfo : BaseGenerator { @@ -36,35 +36,35 @@ public override string TransformText() // associated with an assembly. [assembly: AssemblyTitle("""); - #line 12 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 12 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyTitle)); #line default #line hidden this.Write("\")]\r\n#if BCL\r\n[assembly: AssemblyDescription(\""); - #line 14 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 14 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: "4.7.2"))); #line default #line hidden this.Write("\")]\r\n#elif NETSTANDARD20\r\n[assembly: AssemblyDescription(\""); - #line 16 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 16 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: "NetStandard 2.0"))); #line default #line hidden this.Write("\")]\r\n#elif NETCOREAPP3_1\r\n[assembly: AssemblyDescription(\""); - #line 18 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 18 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: ".NET Core 3.1"))); #line default #line hidden this.Write("\")]\r\n#elif NET8_0\r\n[assembly: AssemblyDescription(\""); - #line 20 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 20 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: ".NET 8.0"))); #line default @@ -72,7 +72,7 @@ public override string TransformText() this.Write("\")]\r\n#else\r\n#error Unknown platform constant - unable to set correct AssemblyDesc" + "ription\r\n#endif\r\n\r\n"); - #line 25 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 25 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" if (this.Config.AssemblyTitle=="AWSSDK.DynamoDBv2") { #line default @@ -81,7 +81,22 @@ public override string TransformText() [assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] "); - #line 28 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 28 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + } + + #line default + #line hidden + + #line 29 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + if (this.Config.AssemblyTitle=="AWSSDK.S3") { + + #line default + #line hidden + this.Write(@"[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] +[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] +"); + + #line 32 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" } #line default @@ -110,14 +125,14 @@ public override string TransformText() // [assembly: AssemblyVersion(""1.0.*"")] [assembly: AssemblyVersion("""); - #line 51 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 55 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceVersion)); #line default #line hidden this.Write("\")]\r\n[assembly: AssemblyFileVersion(\""); - #line 52 "C:\codebase\v4\aws-sdk-net-v4\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 56 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceFileVersion)); #line default diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt index 4a8b9fad751a..ab2cf5d21a23 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt @@ -26,6 +26,10 @@ using System.Runtime.CompilerServices; [assembly: InternalsVisibleTo("AWSSDK.UnitTests.DynamoDBv2.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] <# } #> +<# if (this.Config.AssemblyTitle=="AWSSDK.S3") { #> +[assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] +[assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] +<# } #> [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] [assembly: AssemblyCompany("Amazon.com, Inc")] diff --git a/generator/ServiceModels/_manifest.json b/generator/ServiceModels/_manifest.json index 24252ee5bfd6..5d712b04c8c1 100644 --- a/generator/ServiceModels/_manifest.json +++ b/generator/ServiceModels/_manifest.json @@ -60,7 +60,8 @@ "Custom\\Runtime\\TestResponses\\*.txt", "Custom\\Runtime\\EventStreams\\test_vectors\\*", "Custom\\Runtime\\TestEndpoints\\*.json", - "Custom\\TestTools\\ComparerTest.json" + "Custom\\TestTools\\ComparerTest.json", + "..\\Services\\S3\\UnitTests\\Custom\\EmbeddedResource\\*" ], "packageReferences": [ { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs new file mode 100644 index 000000000000..d130aee20bff --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -0,0 +1,104 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Utility class for mapping S3 response objects to TransferUtilityUploadResponse. + /// Maps fields based on the mapping.json configuration used by the Transfer Utility. + /// + internal static class ResponseMapper + { + /// + /// Maps a PutObjectResponse to TransferUtilityUploadResponse. + /// Uses the field mappings defined in mapping.json "Conversion" -> "PutObjectResponse" -> "UploadResponse". + /// + /// The PutObjectResponse to map from + /// A new TransferUtilityUploadResponse with mapped fields + internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResponse source) + { + if (source == null) + return null; + + var response = new TransferUtilityUploadResponse(); + + // Map all fields as defined in mapping.json "Conversion" -> "PutObjectResponse" -> "UploadResponse" + if (source.IsSetBucketKeyEnabled()) + response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + + if (source.IsSetChecksumCRC32()) + response.ChecksumCRC32 = source.ChecksumCRC32; + + if (source.IsSetChecksumCRC32C()) + response.ChecksumCRC32C = source.ChecksumCRC32C; + + if (source.IsSetChecksumCRC64NVME()) + response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + + if (source.IsSetChecksumSHA1()) + response.ChecksumSHA1 = source.ChecksumSHA1; + + if (source.IsSetChecksumSHA256()) + response.ChecksumSHA256 = source.ChecksumSHA256; + + if (source.IsSetChecksumType()) + response.ChecksumType = source.ChecksumType; + + if (source.IsSetETag()) + response.ETag = source.ETag; + + if (source.Expiration != null) + response.Expiration = source.Expiration; + + if (source.IsSetRequestCharged()) + response.RequestCharged = source.RequestCharged; + + if (source.ServerSideEncryptionCustomerMethod != null) + response.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; + + if (source.ServerSideEncryptionCustomerProvidedKeyMD5 != null) + response.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; + + if (source.ServerSideEncryptionKeyManagementServiceEncryptionContext != null) + response.ServerSideEncryptionKeyManagementServiceEncryptionContext = source.ServerSideEncryptionKeyManagementServiceEncryptionContext; + + if (source.IsSetServerSideEncryptionKeyManagementServiceKeyId()) + response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + + if (source.ServerSideEncryptionMethod != null) + response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + + if (source.IsSetVersionId()) + response.VersionId = source.VersionId; + + // Copy response metadata + response.ResponseMetadata = source.ResponseMetadata; + response.ContentLength = source.ContentLength; + response.HttpStatusCode = source.HttpStatusCode; + + return response; + } + + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs new file mode 100644 index 000000000000..3fcc20294a0a --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs @@ -0,0 +1,470 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using Amazon.Runtime; +using Amazon.S3.Model; +using Amazon.Runtime.Internal; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility upload operations. + /// Contains unified response fields from both simple uploads (PutObjectResponse) + /// and multipart uploads (CompleteMultipartUploadResponse). + /// + public class TransferUtilityUploadResponse : AmazonWebServiceResponse + { + private bool? _bucketKeyEnabled; + private string _checksumCRC32; + private string _checksumCRC32C; + private string _checksumCRC64NVME; + private string _checksumSHA1; + private string _checksumSHA256; + private ChecksumType _checksumType; + private string _etag; + private Expiration _expiration; + private RequestCharged _requestCharged; + private ServerSideEncryptionCustomerMethod _serverSideEncryptionCustomerMethod; + private string _sseCustomerKeyMD5; + private string _sseKmsEncryptionContext; + private string _sseKmsKeyId; + private ServerSideEncryptionMethod _serverSideEncryption; + private string _versionId; + + /// + /// Gets and sets the property BucketKeyEnabled. + /// + /// Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption + /// with Key Management Service (KMS) keys (SSE-KMS). + /// + /// + public bool? BucketKeyEnabled + { + get { return this._bucketKeyEnabled; } + set { this._bucketKeyEnabled = value; } + } + + /// + /// Checks if BucketKeyEnabled property is set. + /// + /// true if BucketKeyEnabled property is set. + internal bool IsSetBucketKeyEnabled() + { + return this._bucketKeyEnabled.HasValue; + } + + /// + /// Gets and sets the property ChecksumCRC32. + /// + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only present + /// if the checksum was uploaded with the object. When you use an API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumCRC32 + { + get { return this._checksumCRC32; } + set { this._checksumCRC32 = value; } + } + + /// + /// Checks if ChecksumCRC32 property is set. + /// + /// true if ChecksumCRC32 property is set. + internal bool IsSetChecksumCRC32() + { + return !string.IsNullOrEmpty(this._checksumCRC32); + } + + /// + /// Gets and sets the property ChecksumCRC32C. + /// + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present + /// if the checksum was uploaded with the object. When you use an API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumCRC32C + { + get { return this._checksumCRC32C; } + set { this._checksumCRC32C = value; } + } + + /// + /// Checks if ChecksumCRC32C property is set. + /// + /// true if ChecksumCRC32C property is set. + internal bool IsSetChecksumCRC32C() + { + return !string.IsNullOrEmpty(this._checksumCRC32C); + } + + /// + /// Gets and sets the property ChecksumCRC64NVME. + /// + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This header is present + /// if it was uploaded with the CRC-64NVME checksum algorithm, or if it was uploaded + /// without a checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). + /// For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumCRC64NVME + { + get { return this._checksumCRC64NVME; } + set { this._checksumCRC64NVME = value; } + } + + /// + /// Checks if ChecksumCRC64NVME property is set. + /// + /// true if ChecksumCRC64NVME property is set. + internal bool IsSetChecksumCRC64NVME() + { + return !string.IsNullOrEmpty(this._checksumCRC64NVME); + } + + /// + /// Gets and sets the property ChecksumSHA1. + /// + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present + /// if it was uploaded with the object. When you use the API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumSHA1 + { + get { return this._checksumSHA1; } + set { this._checksumSHA1 = value; } + } + + /// + /// Checks if ChecksumSHA1 property is set. + /// + /// true if ChecksumSHA1 property is set. + internal bool IsSetChecksumSHA1() + { + return !string.IsNullOrEmpty(this._checksumSHA1); + } + + /// + /// Gets and sets the property ChecksumSHA256. + /// + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present + /// if it was uploaded with the object. When you use an API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumSHA256 + { + get { return this._checksumSHA256; } + set { this._checksumSHA256 = value; } + } + + /// + /// Checks if ChecksumSHA256 property is set. + /// + /// true if ChecksumSHA256 property is set. + internal bool IsSetChecksumSHA256() + { + return !string.IsNullOrEmpty(this._checksumSHA256); + } + + /// + /// Gets and sets the property ChecksumType. + /// + /// This header specifies the checksum type of the object, which determines how part-level + /// checksums are combined to create an object-level checksum for multipart objects. For + /// PutObject uploads, the checksum type is always FULL_OBJECT. You can use + /// this header as a data integrity check to verify that the checksum type that is received + /// is the same checksum that was specified. For more information, + /// see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public ChecksumType ChecksumType + { + get { return this._checksumType; } + set { this._checksumType = value; } + } + + /// + /// Checks if ChecksumType property is set. + /// + /// true if ChecksumType property is set. + internal bool IsSetChecksumType() + { + return this._checksumType != null; + } + + /// + /// Gets and sets the property ETag. + /// + /// Entity tag for the uploaded object. + /// + /// + /// + /// General purpose buckets - To ensure that data is not corrupted traversing + /// the network, for objects where the ETag is the MD5 digest of the object, you can calculate + /// the MD5 while putting an object to Amazon S3 and compare the returned ETag to the + /// calculated MD5 value. + /// + /// + /// + /// Directory buckets - The ETag for the object in a directory bucket isn't the + /// MD5 digest of the object. + /// + /// + public string ETag + { + get { return this._etag; } + set { this._etag = value; } + } + + /// + /// Checks if ETag property is set. + /// + /// true if ETag property is set. + internal bool IsSetETag() + { + return !string.IsNullOrEmpty(this._etag); + } + + /// + /// Gets and sets the property Expiration. + /// + /// If the object expiration is configured, this will contain the expiration date (expiry-date) + /// and rule ID (rule-id). The value of rule-id is URL encoded. + /// + /// + /// Object expiration information is not returned for directory buckets (for those, the + /// response header will contain the value "NotImplemented"). + /// + /// + public Expiration Expiration + { + get { return this._expiration; } + set { this._expiration = value; } + } + + /// + /// Checks if Expiration property is set. + /// + /// true if Expiration property is set. + internal bool IsSetExpiration() + { + return this._expiration != null; + } + + /// + /// Gets and sets the property RequestCharged. + /// + /// If present, indicates that the requester was successfully charged for the request. + /// + /// + public RequestCharged RequestCharged + { + get { return this._requestCharged; } + set { this._requestCharged = value; } + } + + /// + /// Checks if RequestCharged property is set. + /// + /// true if RequestCharged property is set. + internal bool IsSetRequestCharged() + { + return this._requestCharged != null; + } + + /// + /// The Server-side encryption algorithm to be used with the customer provided key. + /// + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// + public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod + { + get { return this._serverSideEncryptionCustomerMethod; } + set { this._serverSideEncryptionCustomerMethod = value; } + } + + /// + /// Checks if ServerSideEncryptionCustomerMethod property is set. + /// + /// true if ServerSideEncryptionCustomerMethod property is set. + internal bool IsSetServerSideEncryptionCustomerMethod() + { + return this._serverSideEncryptionCustomerMethod != null; + } + + /// + /// The MD5 of the customer encryption key specified in the ServerSideEncryptionCustomerProvidedKey property. The MD5 is + /// base 64 encoded. This field is optional, the SDK will calculate the MD5 if this is not set. + /// + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// + public string ServerSideEncryptionCustomerProvidedKeyMD5 + { + get { return this._sseCustomerKeyMD5; } + set { this._sseCustomerKeyMD5 = value; } + } + + /// + /// Checks if ServerSideEncryptionCustomerProvidedKeyMD5 property is set. + /// + /// true if ServerSideEncryptionCustomerProvidedKeyMD5 property is set. + internal bool IsSetServerSideEncryptionCustomerProvidedKeyMD5() + { + return !string.IsNullOrEmpty(this._sseCustomerKeyMD5); + } + + /// + /// + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. + /// The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + /// This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. + /// + /// + [AWSProperty(Sensitive=true)] + public string ServerSideEncryptionKeyManagementServiceEncryptionContext + { + get { return this._sseKmsEncryptionContext; } + set { this._sseKmsEncryptionContext = value; } + } + + /// + /// Checks if ServerSideEncryptionKeyManagementServiceEncryptionContext property is set. + /// + /// true if ServerSideEncryptionKeyManagementServiceEncryptionContext property is set. + internal bool IsSetServerSideEncryptionKeyManagementServiceEncryptionContext() + { + return !string.IsNullOrEmpty(this._sseKmsEncryptionContext); + } + + /// + /// + /// If present, indicates the ID of the KMS key that was used for object encryption. + /// + /// + [AWSProperty(Sensitive=true)] + public string ServerSideEncryptionKeyManagementServiceKeyId + { + get { return this._sseKmsKeyId; } + set { this._sseKmsKeyId = value; } + } + + /// + /// Checks if ServerSideEncryptionKeyManagementServiceKeyId property is set. + /// + /// true if ServerSideEncryptionKeyManagementServiceKeyId property is set. + internal bool IsSetServerSideEncryptionKeyManagementServiceKeyId() + { + return !string.IsNullOrEmpty(this._sseKmsKeyId); + } + + /// + /// + /// The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx. + /// + /// + /// + /// When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx. + /// + /// + /// + public ServerSideEncryptionMethod ServerSideEncryptionMethod + { + get { return this._serverSideEncryption; } + set { this._serverSideEncryption = value; } + } + + /// + /// Checks if ServerSideEncryptionMethod property is set. + /// + /// true if ServerSideEncryptionMethod property is set. + internal bool IsSetServerSideEncryptionMethod() + { + return this._serverSideEncryption != null; + } + + /// + /// Gets and sets the property VersionId. + /// + /// Version ID of the object. + /// + /// + /// + /// If you enable versioning for a bucket, Amazon S3 automatically generates a unique + /// version ID for the object being stored. Amazon S3 returns this ID in the response. + /// When you enable versioning for a bucket, if Amazon S3 receives multiple write requests + /// for the same object simultaneously, it stores all of the objects. For more information + /// about versioning, see Adding + /// Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For + /// information about returning the versioning state of a bucket, see GetBucketVersioning. + /// + /// + /// + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// + public string VersionId + { + get { return this._versionId; } + set { this._versionId = value; } + } + + /// + /// Checks if VersionId property is set. + /// + /// true if VersionId property is set. + internal bool IsSetVersionId() + { + return !string.IsNullOrEmpty(this._versionId); + } + } +} diff --git a/sdk/src/Services/S3/Properties/AssemblyInfo.cs b/sdk/src/Services/S3/Properties/AssemblyInfo.cs index 5020e3525054..2cc125d196c9 100644 --- a/sdk/src/Services/S3/Properties/AssemblyInfo.cs +++ b/sdk/src/Services/S3/Properties/AssemblyInfo.cs @@ -19,6 +19,8 @@ #error Unknown platform constant - unable to set correct AssemblyDescription #endif +[assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] +[assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] [assembly: AssemblyCompany("Amazon.com, Inc")] diff --git a/sdk/test/Services/S3/UnitTests/AWSSDK.UnitTests.S3.NetFramework.csproj b/sdk/test/Services/S3/UnitTests/AWSSDK.UnitTests.S3.NetFramework.csproj index 3b58730ba499..9a461a902882 100644 --- a/sdk/test/Services/S3/UnitTests/AWSSDK.UnitTests.S3.NetFramework.csproj +++ b/sdk/test/Services/S3/UnitTests/AWSSDK.UnitTests.S3.NetFramework.csproj @@ -75,5 +75,8 @@ + + + \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json new file mode 100644 index 000000000000..224a0a35dfdb --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json @@ -0,0 +1,291 @@ +{ + "Definition": { + "UploadRequest": { + "PutObjectRequest": [ + "ACL", + "Bucket", + "BucketKeyEnabled", + "CacheControl", + "ChecksumAlgorithm", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ContentDisposition", + "ContentEncoding", + "ContentLanguage", + "ContentType", + "ExpectedBucketOwner", + "Expires", + "GrantFullControl", + "GrantRead", + "GrantReadACP", + "GrantWriteACP", + "IfMatch", + "IfNoneMatch", + "Key", + "Metadata", + "ObjectLockLegalHoldStatus", + "ObjectLockMode", + "ObjectLockRetainUntilDate", + "RequestPayer", + "SSECustomerAlgorithm", + "SSECustomerKey", + "SSECustomerKeyMD5", + "SSEKMSEncryptionContext", + "SSEKMSKeyId", + "ServerSideEncryption", + "StorageClass", + "Tagging", + "WebsiteRedirectLocation" + ] + }, + "UploadResponse": { + "PutObjectResponse": [ + "BucketKeyEnabled", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ChecksumType", + "ETag", + "Expiration", + "RequestCharged", + "SSECustomerAlgorithm", + "SSECustomerKeyMD5", + "SSEKMSEncryptionContext", + "SSEKMSKeyId", + "ServerSideEncryption", + "VersionId" + ] + }, + "DownloadRequest": { + "GetObjectRequest": [ + "Bucket", + "ChecksumMode", + "ExpectedBucketOwner", + "IfMatch", + "IfModifiedSince", + "IfNoneMatch", + "IfUnmodifiedSince", + "Key", + "RequestPayer", + "ResponseCacheControl", + "ResponseContentDisposition", + "ResponseContentEncoding", + "ResponseContentLanguage", + "ResponseContentType", + "ResponseExpires", + "SSECustomerAlgorithm", + "SSECustomerKey", + "SSECustomerKeyMD5", + "VersionId" + ] + }, + "DownloadResponse": { + "GetObjectResponse": [ + "AcceptRanges", + "BucketKeyEnabled", + "CacheControl", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ChecksumType", + "ContentDisposition", + "ContentEncoding", + "ContentLanguage", + "ContentLength", + "ContentRange", + "ContentType", + "DeleteMarker", + "ETag", + "Expiration", + "Expires", + "LastModified", + "Metadata", + "MissingMeta", + "ObjectLockLegalHoldStatus", + "ObjectLockMode", + "ObjectLockRetainUntilDate", + "PartsCount", + "ReplicationStatus", + "RequestCharged", + "Restore", + "SSECustomerAlgorithm", + "SSECustomerKeyMD5", + "SSEKMSKeyId", + "ServerSideEncryption", + "StorageClass", + "TagCount", + "VersionId", + "WebsiteRedirectLocation" + ] + } + }, + "Conversion": { + "UploadRequest": { + "PutObjectRequest": [ + "Bucket", + "ChecksumAlgorithm", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ExpectedBucketOwner", + "Key", + "RequestPayer", + "SSECustomerAlgorithm", + "SSECustomerKey", + "SSECustomerKeyMD5" + ], + "CreateMultipartRequest": [ + "ACL", + "Bucket", + "BucketKeyEnabled", + "CacheControl", + "ChecksumAlgorithm", + "ContentDisposition", + "ContentEncoding", + "ContentLanguage", + "ContentType", + "ExpectedBucketOwner", + "Expires", + "GrantFullControl", + "GrantRead", + "GrantReadACP", + "GrantWriteACP", + "Key", + "Metadata", + "ObjectLockLegalHoldStatus", + "ObjectLockMode", + "ObjectLockRetainUntilDate", + "RequestPayer", + "SSECustomerAlgorithm", + "SSECustomerKey", + "SSECustomerKeyMD5", + "SSEKMSEncryptionContext", + "SSEKMSKeyId", + "ServerSideEncryption", + "StorageClass", + "Tagging", + "WebsiteRedirectLocation" + ], + "UploadPartRequest": [ + "Bucket", + "ChecksumAlgorithm", + "ExpectedBucketOwner", + "Key", + "RequestPayer", + "SSECustomerAlgorithm", + "SSECustomerKey", + "SSECustomerKeyMD5" + ], + "CompleteMultipartRequest": [ + "Bucket", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ExpectedBucketOwner", + "IfMatch", + "IfNoneMatch", + "Key", + "RequestPayer", + "SSECustomerAlgorithm", + "SSECustomerKey", + "SSECustomerKeyMD5" + ], + "AbortMultipartRequest": [ + "Bucket", + "ExpectedBucketOwner", + "Key", + "RequestPayer" + ] + }, + "CompleteMultipartResponse": { + "UploadResponse": [ + "BucketKeyEnabled", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ChecksumType", + "ETag", + "Expiration", + "RequestCharged", + "SSEKMSKeyId", + "ServerSideEncryption", + "VersionId" + ] + }, + "PutObjectResponse": { + "UploadResponse": [ + "BucketKeyEnabled", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ChecksumType", + "ETag", + "Expiration", + "RequestCharged", + "SSECustomerAlgorithm", + "SSECustomerKeyMD5", + "SSEKMSEncryptionContext", + "SSEKMSKeyId", + "ServerSideEncryption", + "VersionId" + ] + }, + "GetObjectResponse": { + "DownloadResponse": [ + "AcceptRanges", + "BucketKeyEnabled", + "CacheControl", + "ChecksumCRC32", + "ChecksumCRC32C", + "ChecksumCRC64NVME", + "ChecksumSHA1", + "ChecksumSHA256", + "ChecksumType", + "ContentDisposition", + "ContentEncoding", + "ContentLanguage", + "ContentLength", + "ContentRange", + "ContentType", + "DeleteMarker", + "ETag", + "Expiration", + "Expires", + "ExpiresString", + "LastModified", + "Metadata", + "MissingMeta", + "ObjectLockLegalHoldStatus", + "ObjectLockMode", + "ObjectLockRetainUntilDate", + "PartsCount", + "ReplicationStatus", + "RequestCharged", + "Restore", + "SSECustomerAlgorithm", + "SSECustomerKeyMD5", + "SSEKMSKeyId", + "ServerSideEncryption", + "StorageClass", + "TagCount", + "VersionId", + "WebsiteRedirectLocation" + ] + } + } +} \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json new file mode 100644 index 000000000000..245790b5fdbe --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -0,0 +1,10 @@ +{ + "PropertyAliases": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSEKMSEncryptionContext": "ServerSideEncryptionKeyManagementServiceEncryptionContext", + "Restore": "RestoreExpiration" + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs new file mode 100644 index 000000000000..ce4960e85985 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -0,0 +1,458 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using Amazon.Runtime; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Net; +using System.Reflection; +using System.Text.Json; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class ResponseMapperTests + { + private static JsonDocument _mappingJson; + private static JsonDocument _propertyAliasesJson; + private static Dictionary _propertyAliases; + + [ClassInitialize] + public static void ClassInitialize(TestContext context) + { + // Read mapping.json using robust resource loading (same pattern as Utils.cs) + using (var stream = GetResourceStream("mapping.json")) + { + if (stream == null) + { + throw new FileNotFoundException("Could not find embedded resource: mapping.json"); + } + + using (var reader = new StreamReader(stream)) + { + var jsonContent = reader.ReadToEnd(); + _mappingJson = JsonDocument.Parse(jsonContent); + } + } + + // Read property-aliases.json using robust resource loading + using (var stream = GetResourceStream("property-aliases.json")) + { + if (stream != null) + { + using (var reader = new StreamReader(stream)) + { + var aliasContent = reader.ReadToEnd(); + _propertyAliasesJson = JsonDocument.Parse(aliasContent); + + // Convert to dictionary for fast lookup + _propertyAliases = new Dictionary(); + var aliasesElement = _propertyAliasesJson.RootElement.GetProperty("PropertyAliases"); + foreach (var alias in aliasesElement.EnumerateObject()) + { + _propertyAliases[alias.Name] = alias.Value.GetString(); + } + } + } + else + { + _propertyAliases = new Dictionary(); + } + } + } + + /// + /// Gets embedded resource stream using partial name matching (same pattern as Utils.cs) + /// + private static Stream GetResourceStream(string resourceName) + { + Assembly assembly = Assembly.GetExecutingAssembly(); + var resource = FindResourceName(assembly, resourceName); + if(resource == null) + { + assembly = Assembly.GetCallingAssembly(); + resource = FindResourceName(assembly, resourceName); + } + + return resource != null ? assembly.GetManifestResourceStream(resource) : null; + } + + /// + /// Finds resource name using case-insensitive partial matching (same pattern as Utils.cs) + /// + private static string FindResourceName(Assembly assembly, string partialName) + { + var resources = FindResourceName(assembly, s => s.IndexOf(partialName, StringComparison.OrdinalIgnoreCase) >= 0); + return resources.FirstOrDefault(); + } + + /// + /// Finds resource names matching predicate (same pattern as Utils.cs) + /// + private static IEnumerable FindResourceName(Assembly assembly, Predicate match) + { + var allResources = assembly.GetManifestResourceNames(); + foreach (var resource in allResources) + { + if (match(resource)) + yield return resource; + } + } + + [ClassCleanup] + public static void ClassCleanup() + { + _mappingJson?.Dispose(); + _propertyAliasesJson?.Dispose(); + } + + [TestMethod] + [TestCategory("S3")] + public void MapPutObjectResponse_AllMappedProperties_WorkCorrectly() + { + // Get the expected mappings from JSON + var putObjectMappings = _mappingJson.RootElement + .GetProperty("Conversion") + .GetProperty("PutObjectResponse") + .GetProperty("UploadResponse") + .EnumerateArray() + .Select(prop => prop.GetString()) + .ToList(); + + // Create source object with dynamically generated test data + var sourceResponse = new PutObjectResponse(); + var sourceType = typeof(PutObjectResponse); + var testDataValues = new Dictionary(); + + // Generate test data for each mapped property + foreach (var propertyName in putObjectMappings) + { + // Resolve alias to actual property name + var resolvedPropertyName = ResolvePropertyName(propertyName); + var sourceProperty = sourceType.GetProperty(resolvedPropertyName); + if (sourceProperty?.CanWrite == true) + { + var testValue = GenerateTestValue(sourceProperty.PropertyType, propertyName); + sourceProperty.SetValue(sourceResponse, testValue); + testDataValues[propertyName] = testValue; + } + } + + // Add inherited properties for comprehensive testing + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 1024; + + // Map the response + var mappedResponse = ResponseMapper.MapPutObjectResponse(sourceResponse); + Assert.IsNotNull(mappedResponse, "Mapped response should not be null"); + + // Verify all mapped properties using reflection + var targetType = typeof(TransferUtilityUploadResponse); + var failedAssertions = new List(); + + foreach (var propertyName in putObjectMappings) + { + // Resolve alias to actual property name for reflection lookups + var resolvedPropertyName = ResolvePropertyName(propertyName); + var sourceProperty = sourceType.GetProperty(resolvedPropertyName); + var targetProperty = targetType.GetProperty(resolvedPropertyName); + + if (sourceProperty == null) + { + failedAssertions.Add($"Source property '{propertyName}' (resolved to: {resolvedPropertyName}) not found in PutObjectResponse"); + continue; + } + + if (targetProperty == null) + { + failedAssertions.Add($"Target property '{propertyName}' (resolved to: {resolvedPropertyName}) not found in TransferUtilityUploadResponse"); + continue; + } + + var sourceValue = sourceProperty.GetValue(sourceResponse); + var targetValue = targetProperty.GetValue(mappedResponse); + + // Special handling for complex object comparisons + if (!AreValuesEqual(sourceValue, targetValue)) + { + failedAssertions.Add($"{propertyName}: Expected '{sourceValue ?? "null"}', got '{targetValue ?? "null"}'"); + } + } + + // Test inherited properties + Assert.AreEqual(sourceResponse.HttpStatusCode, mappedResponse.HttpStatusCode, "HttpStatusCode should match"); + Assert.AreEqual(sourceResponse.ContentLength, mappedResponse.ContentLength, "ContentLength should match"); + + // Report any failures + if (failedAssertions.Any()) + { + Assert.Fail($"Property mapping failures:\n{string.Join("\n", failedAssertions)}"); + } + } + + [TestMethod] + [TestCategory("S3")] + public void MapPutObjectResponse_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new PutObjectResponse { Expiration = null }, + + // Test null enum conversions + new PutObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapPutObjectResponse(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + } + } + + [TestMethod] + [TestCategory("S3")] + public void ValidateTransferUtilityUploadResponseDefinitionCompleteness() + { + ValidateResponseDefinitionCompleteness( + new[] { "Definition", "UploadResponse", "PutObjectResponse" }, + "TransferUtilityUploadResponse"); + } + + [TestMethod] + [TestCategory("S3")] + public void ValidateCompleteMultipartUploadResponseConversionCompleteness() + { + ValidateResponseDefinitionCompleteness( + new[] { "Conversion", "CompleteMultipartResponse", "UploadResponse" }, + "TransferUtilityUploadResponse"); + } + + // Uncomment for DOTNET-8277 + + // [TestMethod] + // [TestCategory("S3")] + // public void ValidatePutObjectRequestDefinitionCompleteness() + // { + // ValidateResponseDefinitionCompleteness( + // new[] { "Definition", "UploadRequest", "PutObjectRequest" }, + // "PutObjectRequest"); + // } + + // [TestMethod] + // [TestCategory("S3")] + // public void ValidateGetObjectRequestDefinitionCompleteness() + // { + // ValidateResponseDefinitionCompleteness( + // new[] { "Definition", "DownloadRequest", "GetObjectRequest" }, + // "GetObjectRequest"); + // } + + // [TestMethod] + // [TestCategory("S3")] + // public void ValidateGetObjectRequestDefinitionCompleteness() + // { + // ValidateResponseDefinitionCompleteness( + // new[] { "Definition", "DownloadRequest", "GetObjectRequest" }, + // "TransferUtilityDownloadRequest"); + // } + + /// + /// Generates appropriate test data for a given property type + /// + private static object GenerateTestValue(Type propertyType, string propertyName) + { + // Handle nullable types + if (propertyType.IsGenericType && propertyType.GetGenericTypeDefinition() == typeof(Nullable<>)) + { + var underlyingType = Nullable.GetUnderlyingType(propertyType); + return GenerateTestValue(underlyingType, propertyName); + } + + // String properties + if (propertyType == typeof(string)) + { + return $"test-{propertyName.ToLower()}"; + } + + // Boolean properties + if (propertyType == typeof(bool)) + { + return true; + } + + // Enum properties + if (propertyType.IsEnum) + { + // For all enums, use the first available value + var enumValues = Enum.GetValues(propertyType); + return enumValues.Length > 0 ? enumValues.GetValue(0) : + throw new InvalidOperationException($"Enum {propertyType.Name} has no values"); + } + + // AWS SDK ConstantClass properties (like ChecksumType, RequestCharged, etc.) + if (typeof(ConstantClass).IsAssignableFrom(propertyType)) + { + // Use reflection to get static readonly fields that are of the same type + var constantFields = propertyType.GetFields(BindingFlags.Public | BindingFlags.Static) + .Where(f => f.IsStatic && f.IsInitOnly && f.FieldType == propertyType); + + var firstConstant = constantFields.FirstOrDefault(); + return firstConstant?.GetValue(null) ?? + throw new InvalidOperationException($"ConstantClass {propertyType.Name} has no static constants"); + } + + // Special object types + if (propertyType == typeof(Expiration)) + { + return new Expiration + { + ExpiryDate = DateTime.UtcNow.AddDays(30), + RuleId = "test-expiration-rule" + }; + } + + // Integer types + if (propertyType == typeof(int) || propertyType == typeof(long)) + { + return 1024; + } + + // For unknown types, throw an exception instead of returning null + // If we've reached this point it means there is an unhandled scenario/missing mapping in our test code that we need to handle. + throw new NotSupportedException( + $"GenerateTestValue does not support type '{propertyType.FullName}' for property '{propertyName}'. " + + $"Please add support for this type to ensure comprehensive test coverage."); + } + + /// + /// Compares two values for equality with special handling for complex objects + /// + private static bool AreValuesEqual(object sourceValue, object targetValue) + { + // Both null + if (sourceValue == null && targetValue == null) + return true; + + // One null, other not + if (sourceValue == null || targetValue == null) + return false; + + // Special handling for Expiration objects + if (sourceValue is Expiration sourceExpiration && targetValue is Expiration targetExpiration) + { + return sourceExpiration.ExpiryDate == targetExpiration.ExpiryDate && + sourceExpiration.RuleId == targetExpiration.RuleId; + } + + // For most cases, use default equality + return sourceValue.Equals(targetValue); + } + + /// + /// Resolves a property name to its actual class property name, checking aliases if needed + /// + private static string ResolvePropertyName(string propertyName) + { + // Check if there's an alias for this property name + if (_propertyAliases.TryGetValue(propertyName, out var aliasedName)) + { + return aliasedName; + } + + // Return the original name if no alias exists + return propertyName; + } + + /// + /// Generic helper method to validate response definition completeness. + /// This method ensures that all properties defined in mapping.json actually exist + /// in the corresponding AWS SDK response classes, supporting property name aliases + /// for backwards compatibility and maintainability. + /// + private static void ValidateResponseDefinitionCompleteness( + string[] jsonPath, + string responseTypeName, + Func> getAdditionalProperties = null) + { + // Get direct properties from response class + var directProperties = typeof(TResponse) + .GetProperties(BindingFlags.Public | BindingFlags.Instance) + .Where(p => p.CanRead) + .Select(p => p.Name) + .ToList(); + + // Get additional properties if provided (e.g., HeadersCollection properties) + var additionalProperties = getAdditionalProperties?.Invoke()?.ToList() ?? new List(); + + // Combine direct and additional properties + var actualProperties = directProperties.Union(additionalProperties) + .OrderBy(name => name) + .ToList(); + + // Navigate to the JSON definition using the provided path + var jsonElement = _mappingJson.RootElement; + foreach (var pathSegment in jsonPath) + { + jsonElement = jsonElement.GetProperty(pathSegment); + } + + var definitionProperties = jsonElement + .EnumerateArray() + .Select(prop => prop.GetString()) + .OrderBy(name => name) + .ToList(); + + // Check each definition property, resolving aliases as needed + var extraInDefinition = new List(); + + foreach (var definitionProperty in definitionProperties) + { + var resolvedPropertyName = ResolvePropertyName(definitionProperty); + + // Check if the resolved property name exists in the actual class + if (!actualProperties.Contains(resolvedPropertyName)) + { + extraInDefinition.Add($"{definitionProperty} (resolved to: {resolvedPropertyName})"); + } + } + + // Assert no extra properties + if (extraInDefinition.Any()) + { + var additionalContext = additionalProperties.Any() + ? $" or additional properties" + : ""; + + Assert.Fail($"Definition section contains {extraInDefinition.Count} extra properties that don't exist in the actual {responseTypeName} class{additionalContext}: {string.Join(", ", extraInDefinition)}. " + + $"Please verify they exist in the response class{additionalContext}."); + } + } + } +} diff --git a/sdk/test/UnitTests/AWSSDK.UnitTests.NetFramework.csproj b/sdk/test/UnitTests/AWSSDK.UnitTests.NetFramework.csproj index 5d1c7e6f8ba0..b6f3c88dfff9 100644 --- a/sdk/test/UnitTests/AWSSDK.UnitTests.NetFramework.csproj +++ b/sdk/test/UnitTests/AWSSDK.UnitTests.NetFramework.csproj @@ -84,6 +84,7 @@ + \ No newline at end of file From 6212c3bd65d83ec4753ac63f0610a0ca4d7c7b98 Mon Sep 17 00:00:00 2001 From: Philippe El Asmar <53088140+philasmar@users.noreply.github.com> Date: Tue, 21 Oct 2025 19:18:37 -0400 Subject: [PATCH 03/56] Add missing fields to Transfer Utility request objects (#4056) --- .../9d07dc1e-d82d-4f94-8700-c7b57f87205d.json | 11 + .../S3/Custom/Model/HeadersCollection.cs | 15 + .../S3/Custom/Transfer/BaseDownloadRequest.cs | 116 +++- .../S3/Custom/Transfer/BaseUploadRequest.cs | 505 ++++++++++++++++++ .../Internal/AbortMultipartUploadsCommand.cs | 4 +- .../Custom/Transfer/Internal/BaseCommand.cs | 17 +- .../Internal/DownloadDirectoryCommand.cs | 10 +- .../Internal/MultipartUploadCommand.cs | 26 +- .../Transfer/Internal/OpenStreamCommand.cs | 2 +- .../Transfer/Internal/SimpleUploadCommand.cs | 9 +- .../Internal/UploadDirectoryCommand.cs | 8 +- .../_async/MultipartUploadCommand.async.cs | 20 +- ...TransferUtilityDownloadDirectoryRequest.cs | 107 ++++ .../TransferUtilityUploadDirectoryRequest.cs | 280 ---------- .../Transfer/TransferUtilityUploadRequest.cs | 335 ------------ .../EmbeddedResource/property-aliases.json | 121 ++++- .../UnitTests/Custom/ResponseMapperTests.cs | 481 +++++++++++++---- 17 files changed, 1319 insertions(+), 748 deletions(-) create mode 100644 generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f87205d.json diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f87205d.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f87205d.json new file mode 100644 index 000000000000..9b7c13a4f5ab --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f87205d.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Add missing fields to Transfer Utility request objects. ContentType on TransferUtilityUploadRequest and TransferUtilityUploadDirectoryRequest now directly updates the ContentType header, instead of being a separate field on those objects." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Model/HeadersCollection.cs b/sdk/src/Services/S3/Custom/Model/HeadersCollection.cs index ed2945257f9b..32e712cf5018 100644 --- a/sdk/src/Services/S3/Custom/Model/HeadersCollection.cs +++ b/sdk/src/Services/S3/Custom/Model/HeadersCollection.cs @@ -152,6 +152,21 @@ internal bool IsSetContentType() return !string.IsNullOrEmpty(this.ContentType); } + /// + /// The language that the content is in. For more information, + /// see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-language. + /// + public string ContentLanguage + { + get { return this["Content-Language"]; } + set { this["Content-Language"] = value; } + } + + internal bool IsSetContentLanguage() + { + return !string.IsNullOrEmpty(this.ContentLanguage); + } + /// /// /// The date and time at which the object is no longer cacheable. For more information, diff --git a/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs index d29a3747f24a..84bc08c29225 100644 --- a/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs @@ -24,6 +24,7 @@ using System.Text; using Amazon.Runtime.Internal; +using Amazon.S3.Model; namespace Amazon.S3.Transfer { @@ -45,6 +46,11 @@ public abstract class BaseDownloadRequest private RequestPayer requestPayer; + private string expectedBucketOwner; + private string ifMatch; + private string ifNoneMatch; + private ResponseHeaderOverrides responseHeaders; + /// /// Gets or sets the name of the bucket. /// @@ -66,7 +72,7 @@ public string BucketName /// internal bool IsSetBucketName() { - return !System.String.IsNullOrEmpty(this.bucketName); + return !String.IsNullOrEmpty(this.bucketName); } @@ -91,7 +97,7 @@ public string Key /// internal bool IsSetKey() { - return !System.String.IsNullOrEmpty(this.key); + return !String.IsNullOrEmpty(this.key); } /// @@ -112,7 +118,7 @@ public string VersionId /// true if VersionId property is set. internal bool IsSetVersionId() { - return !System.String.IsNullOrEmpty(this.versionId); + return !String.IsNullOrEmpty(this.versionId); } /// @@ -220,5 +226,109 @@ public RequestPayer RequestPayer get { return this.requestPayer; } set { this.requestPayer = value; } } + + /// + /// Gets and sets the property ExpectedBucketOwner. + /// + /// The account ID of the expected bucket owner. If the account ID that you provide does + /// not match the actual owner of the bucket, the request fails with the HTTP status code + /// 403 Forbidden (access denied). + /// + /// + public string ExpectedBucketOwner + { + get { return this.expectedBucketOwner; } + set { this.expectedBucketOwner = value; } + } + + /// + /// Checks to see if ExpectedBucketOwner is set. + /// + /// true, if ExpectedBucketOwner property is set. + internal bool IsSetExpectedBucketOwner() + { + return !String.IsNullOrEmpty(this.expectedBucketOwner); + } + + /// + /// Gets and sets the property IfMatch. + /// + /// Return the object only if its entity tag (ETag) is the same as the one specified in this header; + /// otherwise, return a 412 Precondition Failed error. + /// + /// + /// If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: + /// If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; + /// then, S3 returns 200 OK and the data requested. + /// + /// + /// For more information about conditional requests, see RFC 7232. + /// + /// The property is equivalent to the . + /// + public string IfMatch + { + get { return this.ifMatch; } + set { this.ifMatch = value; } + } + + /// + /// Checks to see if IfMatch is set. + /// + /// true, if IfMatch property is set. + internal bool IsSetIfMatch() + { + return !String.IsNullOrEmpty(this.ifMatch); + } + + /// + /// Gets and sets the property IfNoneMatch. + /// + /// Return the object only if its entity tag (ETag) is different from the one specified in this header; + /// otherwise, return a 304 Not Modified error. + /// + /// + /// If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: + /// If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; + /// then, S3 returns 304 Not Modified HTTP status code. + /// + /// + /// For more information about conditional requests, see RFC 7232. + /// + /// The property is equivalent to the . + /// + public string IfNoneMatch + { + get { return this.ifNoneMatch; } + set { this.ifNoneMatch = value; } + } + + /// + /// Checks to see if IfNoneMatch is set. + /// + /// true, if IfNoneMatch property is set. + internal bool IsSetIfNoneMatch() + { + return !String.IsNullOrEmpty(this.ifNoneMatch); + } + + /// + /// A set of response headers that should be returned with the object. + /// + public ResponseHeaderOverrides ResponseHeaderOverrides + { + get + { + if (this.responseHeaders == null) + { + this.responseHeaders = new ResponseHeaderOverrides(); + } + return this.responseHeaders; + } + set + { + this.responseHeaders = value; + } + } } } \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/BaseUploadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/BaseUploadRequest.cs index 8a1cec4a957c..d087be435f4f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/BaseUploadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/BaseUploadRequest.cs @@ -21,6 +21,9 @@ * */ using System; +using System.Collections.Generic; +using Amazon.Runtime.Internal; +using Amazon.S3.Model; namespace Amazon.S3.Transfer { @@ -29,7 +32,28 @@ namespace Amazon.S3.Transfer /// public abstract class BaseUploadRequest { + private string bucketName; private RequestPayer requestPayer; + private ServerSideEncryptionMethod encryption; + private ServerSideEncryptionCustomerMethod serverSideCustomerEncryption; + private string serverSideEncryptionCustomerProvidedKey; + private string serverSideEncryptionCustomerProvidedKeyMD5; + private string serverSideEncryptionKeyManagementServiceKeyId; + private ChecksumAlgorithm checksumAlgorithm; + private S3CannedACL cannedACL; + private S3StorageClass storageClass; + private MetadataCollection metadataCollection = new MetadataCollection(); + private List tagset; + private ObjectLockLegalHoldStatus objectLockLegalHoldStatus; + private ObjectLockMode objectLockMode; + private DateTime? objectLockRetainUntilDate; + private bool? disablePayloadSigning; + private bool? bucketKeyEnabled; + private string expectedBucketOwner; + private string sseKMSEncryptionContext; + private string websiteRedirectLocation; + private HeadersCollection headersCollection = new HeadersCollection(); + private List _grants = AWSConfigs.InitializeCollections ? new List() : null; /// /// Confirms that the requester knows that they will be charged for the request. @@ -40,5 +64,486 @@ public RequestPayer RequestPayer get { return this.requestPayer; } set { this.requestPayer = value; } } + + #region BucketName + + /// + /// Gets or sets the name of the bucket. + /// + /// + /// The name of the bucket. + /// + [AWSProperty(Required = true)] + public string BucketName + { + get { return this.bucketName; } + set { this.bucketName = value; } + } + + + /// + /// Checks if BucketName property is set. + /// + /// true if BucketName property is set. + internal bool IsSetBucketName() + { + return !String.IsNullOrEmpty(this.bucketName); + } + + #endregion + + #region ContentType + /// + /// Gets or sets the content type of the uploaded Amazon S3 object. + /// This is a convenience property for Headers.ContentType. + /// + /// The content type of the uploaded Amazon S3 object. + /// + /// + public string ContentType + { + get { return this.Headers.ContentType; } + set { this.Headers.ContentType = value; } + } + + + /// + /// Checks if ContentType property is set. + /// + /// true if ContentType property is set. + internal bool IsSetContentType() + { + return !String.IsNullOrEmpty(this.Headers.ContentType); + } + + #endregion + + #region ServerSideEncryption + + /// + /// Gets and sets the ServerSideEncryptionMethod property. + /// Specifies the encryption used on the server to + /// store the content. + /// + public ServerSideEncryptionMethod ServerSideEncryptionMethod + { + get { return this.encryption; } + set { this.encryption = value; } + } + + /// + /// The Server-side encryption algorithm to be used with the customer provided key. + /// + public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod + { + get { return this.serverSideCustomerEncryption; } + set { this.serverSideCustomerEncryption = value; } + } + + /// + /// The id of the AWS Key Management Service key that Amazon S3 should use to encrypt and decrypt the object. + /// If a key id is not specified, the default key will be used for encryption and decryption. + /// + [AWSProperty(Sensitive=true)] + public string ServerSideEncryptionKeyManagementServiceKeyId + { + get { return this.serverSideEncryptionKeyManagementServiceKeyId; } + set { this.serverSideEncryptionKeyManagementServiceKeyId = value; } + } + + /// + /// Checks if ServerSideEncryptionKeyManagementServiceKeyId property is set. + /// + /// true if ServerSideEncryptionKeyManagementServiceKeyId property is set. + internal bool IsSetServerSideEncryptionKeyManagementServiceKeyId() + { + return !String.IsNullOrEmpty(this.serverSideEncryptionKeyManagementServiceKeyId); + } + + /// + /// The Base64 encoded encryption key for Amazon S3 to use to encrypt the object + /// + /// Using the encryption key you provide as part of your request Amazon S3 manages both the encryption, as it writes + /// to disks, and decryption, when you access your objects. Therefore, you don't need to maintain any data encryption code. The only + /// thing you do is manage the encryption keys you provide. + /// + /// + /// When you retrieve an object, you must provide the same encryption key as part of your request. Amazon S3 first verifies + /// the encryption key you provided matches, and then decrypts the object before returning the object data to you. + /// + /// + /// Important: Amazon S3 does not store the encryption key you provide. + /// + /// + [AWSProperty(Sensitive=true)] + public string ServerSideEncryptionCustomerProvidedKey + { + get { return this.serverSideEncryptionCustomerProvidedKey; } + set { this.serverSideEncryptionCustomerProvidedKey = value; } + } + + /// + /// The MD5 of the customer encryption key specified in the ServerSideEncryptionCustomerProvidedKey property. The MD5 is + /// base 64 encoded. This field is optional, the SDK will calculate the MD5 if this is not set. + /// + public string ServerSideEncryptionCustomerProvidedKeyMD5 + { + get { return this.serverSideEncryptionCustomerProvidedKeyMD5; } + set { this.serverSideEncryptionCustomerProvidedKeyMD5 = value; } + } + + #endregion + + /// + /// Gets and sets the property ChecksumAlgorithm. + /// + /// Indicates the algorithm used to create the checksum for the object. + /// For more information, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + /// + /// If you provide an individual checksum, Amazon S3 will ignore any provided ChecksumAlgorithm. + /// + /// + public ChecksumAlgorithm ChecksumAlgorithm + { + get { return this.checksumAlgorithm; } + set { this.checksumAlgorithm = value; } + } + + #region CannedACL + + /// + /// Gets or sets the canned access control list (ACL) + /// for the uploaded object. + /// Please refer to + /// for + /// information on Amazon S3 canned ACLs. + /// + /// + /// The canned access control list (ACL) + /// for the uploaded object. + /// + public S3CannedACL CannedACL + { + get { return this.cannedACL; } + set { this.cannedACL = value; } + } + + /// + /// Checks if the CannedACL property is set. + /// + /// true if there is the CannedACL property is set. + internal bool IsSetCannedACL() + { + return (cannedACL != null); + } + + /// + /// Removes the canned access control list (ACL) + /// for the uploaded object. + /// + public void RemoveCannedACL() + { + this.cannedACL = null; + } + + #endregion + + #region StorageClass + + /// + /// Gets or sets the storage class for the uploaded Amazon S3 object. + /// Please refer to + /// for + /// information on S3 Storage Classes. + /// + /// + /// The storage class for the uploaded Amazon S3 object. + /// + public S3StorageClass StorageClass + { + get { return this.storageClass; } + set { this.storageClass = value; } + } + + #endregion + + /// + /// The collection of meta data for the request. + /// + public MetadataCollection Metadata + { + get + { + if (this.metadataCollection == null) + this.metadataCollection = new MetadataCollection(); + return this.metadataCollection; + } + internal set { this.metadataCollection = value; } + } + + /// + /// The tag-set for the object. + /// + public List TagSet + { + get { return this.tagset; } + set { this.tagset = value; } + } + + /// + /// Gets and sets the property ObjectLockLegalHoldStatus. + /// + /// Specifies whether a legal hold will be applied to this object. For more information + /// about S3 Object Lock, see Object + /// Lock. + /// + /// + public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus + { + get { return this.objectLockLegalHoldStatus; } + set { this.objectLockLegalHoldStatus = value; } + } + + /// + /// Gets and sets the property ObjectLockMode. + /// + /// The Object Lock mode that you want to apply to this object. + /// + /// + public ObjectLockMode ObjectLockMode + { + get { return this.objectLockMode; } + set { this.objectLockMode = value; } + } + + /// + /// Gets and sets the property ObjectLockRetainUntilDate. + /// + /// The date and time when you want this object's Object Lock to expire. + /// + /// + public DateTime? ObjectLockRetainUntilDate + { + get { return this.objectLockRetainUntilDate.GetValueOrDefault(); } + set { this.objectLockRetainUntilDate = value; } + } + + // Check to see if ObjectLockRetainUntilDate property is set + internal bool IsSetObjectLockRetainUntilDate() + { + return this.objectLockRetainUntilDate.HasValue; + } + + /// + /// WARNING: Setting DisablePayloadSigning to true disables the SigV4 payload signing + /// data integrity check on this request. + /// If using SigV4, the DisablePayloadSigning flag controls if the payload should be + /// signed on a request by request basis. By default this flag is null which will use the + /// default client behavior. The default client behavior is to sign the payload. When + /// DisablePayloadSigning is true, the request will be signed with an UNSIGNED-PAYLOAD value. + /// Setting DisablePayloadSigning to true requires that the request is sent over a HTTPS + /// connection. + /// Under certain circumstances, such as uploading to S3 while using MD5 hashing, it may + /// be desirable to use UNSIGNED-PAYLOAD to decrease signing CPU usage. This flag only applies + /// to Amazon S3 PutObject and UploadPart requests. + /// MD5Stream, SigV4 payload signing, and HTTPS each provide some data integrity + /// verification. If DisableMD5Stream is true and DisablePayloadSigning is true, then the + /// possibility of data corruption is completely dependent on HTTPS being the only remaining + /// source of data integrity verification. + /// + public bool? DisablePayloadSigning + { + get { return this.disablePayloadSigning; } + set { this.disablePayloadSigning = value; } + } + + /// + /// WARNING: Setting DisableDefaultChecksumValidation to true disables the default data + /// integrity check on upload requests. + /// When true, checksum verification will not be used in upload requests. This may increase upload + /// performance under high CPU loads. Setting DisableDefaultChecksumValidation sets the deprecated property + /// DisableMD5Stream to the same value. The default value is false. + /// Checksums, SigV4 payload signing, and HTTPS each provide some data integrity + /// verification. If DisableDefaultChecksumValidation is true and DisablePayloadSigning is true, then the + /// possibility of data corruption is completely dependent on HTTPS being the only remaining + /// source of data integrity verification. + /// + public bool? DisableDefaultChecksumValidation { get; set; } + + /// + /// Gets and sets the property BucketKeyEnabled. + /// + /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with + /// server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + /// + /// + /// + /// General purpose buckets - Setting this header to true causes Amazon S3 to use an + /// S3 Bucket Key for object encryption with SSE-KMS. + /// Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + /// + /// + /// + /// Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can't be disabled. + /// S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, + /// from directory buckets to general purpose buckets, or between directory buckets, through + /// CopyObject, + /// UploadPartCopy, + /// the Copy operation in Batch Operations, + /// or the import jobs. + /// + /// In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. + /// + /// + public bool? BucketKeyEnabled + { + get { return this.bucketKeyEnabled; } + set { this.bucketKeyEnabled = value; } + } + + internal bool IsSetBucketKeyEnabled() + { + return bucketKeyEnabled.HasValue; + } + + /// + /// Gets and sets the property ExpectedBucketOwner. + /// + /// The account ID of the expected bucket owner. + /// If the account ID that you provide does not match the actual owner of the bucket, + /// the request fails with the HTTP status code 403 Forbidden (access denied). + /// + /// + public string ExpectedBucketOwner + { + get { return this.expectedBucketOwner; } + set { this.expectedBucketOwner = value; } + } + + /// + /// Checks to see if ExpectedBucketOwner is set. + /// + /// true, if ExpectedBucketOwner property is set. + internal bool IsSetExpectedBucketOwner() + { + return !String.IsNullOrEmpty(this.expectedBucketOwner); + } + + /// + /// Gets the access control lists (ACLs) for this request. + /// Please refer to for information on + /// S3 Grants. + /// + public List Grants + { + get { return _grants; } + set { _grants = value; } + } + + /// + /// Gets and sets the property SSEKMSEncryptionContext. + /// + /// Specifies the Amazon Web Services KMS Encryption Context as + /// an additional encryption context to use for object encryption. + /// The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, + /// which contains the encryption context as key-value pairs. + /// This value is stored as object metadata and automatically gets passed on to + /// Amazon Web Services KMS for future GetObject operations on this object. + /// + /// General purpose buckets + /// - This value must be explicitly added during CopyObject operations + /// if you want an additional encryption context for your object. + /// For more information, see Encryption context + /// in the Amazon S3 User Guide. + /// + /// Directory buckets + /// - You can optionally provide an explicit encryption context value. + /// The value must match the default encryption context + /// - the bucket Amazon Resource Name (ARN). + /// An additional encryption context value is not supported. + /// + /// + public string SSEKMSEncryptionContext + { + get { return this.sseKMSEncryptionContext; } + set { this.sseKMSEncryptionContext = value; } + } + + /// + /// Checks to see if SSEKMSEncryptionContext is set. + /// + /// true, if SSEKMSEncryptionContext property is set. + internal bool IsSetSSEKMSEncryptionContext() + { + return !String.IsNullOrEmpty(this.sseKMSEncryptionContext); + } + + /// + /// Gets and sets the property WebsiteRedirectLocation. + /// + /// If the bucket is configured as a website, + /// redirects requests for this object to another object in the + /// same bucket or to an external URL. + /// Amazon S3 stores the value of this header in the object metadata. + /// For information about object metadata, see Object Key and Metadata + /// in the Amazon S3 User Guide. + /// + /// + /// In the following example, + /// the request header sets the redirect to an object (anotherPage.html) in the same bucket: + /// + /// + /// x-amz-website-redirect-location: /anotherPage.html + /// + /// + /// In the following example, + /// the request header sets the object redirect to another website: + /// + /// + /// x-amz-website-redirect-location: http://www.example.com/ + /// + /// + /// For more information about website hosting in Amazon S3, + /// see Hosting Websites on Amazon S3 + /// and How to Configure Website Page Redirects + /// in the Amazon S3 User Guide. + /// + /// + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// + public string WebsiteRedirectLocation + { + get { return this.websiteRedirectLocation; } + set { this.websiteRedirectLocation = value; } + } + + /// + /// Checks to see if WebsiteRedirectLocation is set. + /// + /// true, if WebsiteRedirectLocation property is set. + internal bool IsSetWebsiteRedirectLocation() + { + return !String.IsNullOrEmpty(this.websiteRedirectLocation); + } + + /// + /// The collection of headers for the request. + /// + public HeadersCollection Headers + { + get + { + if (this.headersCollection == null) + this.headersCollection = new HeadersCollection(); + return this.headersCollection; + } + internal set { this.headersCollection = value; } + } } } \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs index 09ed0dba2e8f..3e3ca44376df 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs @@ -41,7 +41,7 @@ internal AbortMultipartUploadsCommand(IAmazonS3 s3Client, string bucketName, Dat this._initiatedDate = initiateDate; } - private ListMultipartUploadsRequest ConstructListMultipartUploadsRequest(ListMultipartUploadsResponse listResponse) + internal ListMultipartUploadsRequest ConstructListMultipartUploadsRequest(ListMultipartUploadsResponse listResponse) { ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest() { @@ -53,7 +53,7 @@ private ListMultipartUploadsRequest ConstructListMultipartUploadsRequest(ListMul return listRequest; } - private AbortMultipartUploadRequest ConstructAbortMultipartUploadRequest(MultipartUpload upload) + internal AbortMultipartUploadRequest ConstructAbortMultipartUploadRequest(MultipartUpload upload) { var abortRequest = new AbortMultipartUploadRequest() { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs index 5e5e83fdbae2..428758fa54e6 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs @@ -37,7 +37,7 @@ public virtual object Return get { return null; } } - protected GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) + internal GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) { GetObjectRequest getRequest = new GetObjectRequest() { @@ -62,6 +62,21 @@ protected GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request getRequest.ChecksumMode = request.ChecksumMode; getRequest.RequestPayer = request.RequestPayer; + if (request.IsSetExpectedBucketOwner()) + { + getRequest.ExpectedBucketOwner = request.ExpectedBucketOwner; + } + if (request.IsSetIfMatch()) + { + getRequest.EtagToMatch = request.IfMatch; + } + if (request.IsSetIfNoneMatch()) + { + getRequest.EtagToNotMatch = request.IfNoneMatch; + } + + getRequest.ResponseHeaderOverrides = request.ResponseHeaderOverrides; + return getRequest; } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 356091b417ee..0140554ded39 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -91,7 +91,7 @@ private void EnsureDirectoryExists(DirectoryInfo directory) directory.Create(); } - private TransferUtilityDownloadRequest ConstructTransferUtilityDownloadRequest(S3Object s3Object, int prefixLength) + internal TransferUtilityDownloadRequest ConstructTransferUtilityDownloadRequest(S3Object s3Object, int prefixLength) { var downloadRequest = new TransferUtilityDownloadRequest(); downloadRequest.BucketName = this._request.BucketName; @@ -102,6 +102,10 @@ private TransferUtilityDownloadRequest ConstructTransferUtilityDownloadRequest(S downloadRequest.ServerSideEncryptionCustomerProvidedKey = this._request.ServerSideEncryptionCustomerProvidedKey; downloadRequest.ServerSideEncryptionCustomerProvidedKeyMD5 = this._request.ServerSideEncryptionCustomerProvidedKeyMD5; downloadRequest.RequestPayer = this._request.RequestPayer; + downloadRequest.ExpectedBucketOwner = this._request.ExpectedBucketOwner; + downloadRequest.IfMatch = this._request.IfMatch; + downloadRequest.IfNoneMatch = this._request.IfNoneMatch; + downloadRequest.ResponseHeaderOverrides = this._request.ResponseHeaderOverrides; //Ensure the target file is a rooted within LocalDirectory. Otherwise error. if(!InternalSDKUtils.IsFilePathRootedWithDirectoryPath(downloadRequest.FilePath, _request.LocalDirectory)) @@ -137,11 +141,12 @@ private ListObjectsV2Request ConstructListObjectRequestV2() } listRequestV2.RequestPayer = this._request.RequestPayer; + listRequestV2.ExpectedBucketOwner = this._request.ExpectedBucketOwner; return listRequestV2; } - private ListObjectsRequest ConstructListObjectRequest() + internal ListObjectsRequest ConstructListObjectRequest() { ListObjectsRequest listRequest = new ListObjectsRequest(); listRequest.BucketName = this._request.BucketName; @@ -164,6 +169,7 @@ private ListObjectsRequest ConstructListObjectRequest() } listRequest.RequestPayer = this._request.RequestPayer; + listRequest.ExpectedBucketOwner = this._request.ExpectedBucketOwner; return listRequest; } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index c24da9add8e0..644500df5bff 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -147,12 +147,12 @@ private int CalculateConcurrentServiceRequests() return threadCount; } - private CompleteMultipartUploadRequest ConstructCompleteMultipartUploadRequest(InitiateMultipartUploadResponse initResponse) + internal CompleteMultipartUploadRequest ConstructCompleteMultipartUploadRequest(InitiateMultipartUploadResponse initResponse) { return ConstructCompleteMultipartUploadRequest(initResponse, false, null); } - private CompleteMultipartUploadRequest ConstructCompleteMultipartUploadRequest(InitiateMultipartUploadResponse initResponse, bool skipPartValidation, RequestEventHandler requestEventHandler) + internal CompleteMultipartUploadRequest ConstructCompleteMultipartUploadRequest(InitiateMultipartUploadResponse initResponse, bool skipPartValidation, RequestEventHandler requestEventHandler) { if (!skipPartValidation) { @@ -177,6 +177,7 @@ private CompleteMultipartUploadRequest ConstructCompleteMultipartUploadRequest(I ChecksumCRC64NVME = this._fileTransporterRequest.ChecksumCRC64NVME, ChecksumSHA1 = this._fileTransporterRequest.ChecksumSHA1, ChecksumSHA256 = this._fileTransporterRequest.ChecksumSHA256, + ExpectedBucketOwner = this._fileTransporterRequest.ExpectedBucketOwner, }; if(this._fileTransporterRequest.ServerSideEncryptionCustomerMethod != null @@ -210,7 +211,7 @@ private CompleteMultipartUploadRequest ConstructCompleteMultipartUploadRequest(I return compRequest; } - private UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePosition, InitiateMultipartUploadResponse initiateResponse) + internal UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePosition, InitiateMultipartUploadResponse initiateResponse) { UploadPartRequest uploadPartRequest = ConstructGenericUploadPartRequest(initiateResponse); @@ -241,7 +242,7 @@ private UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePo return uploadPartRequest; } - private UploadPartRequest ConstructGenericUploadPartRequest(InitiateMultipartUploadResponse initiateResponse) + internal UploadPartRequest ConstructGenericUploadPartRequest(InitiateMultipartUploadResponse initiateResponse) { UploadPartRequest uploadPartRequest = new UploadPartRequest() { @@ -254,7 +255,8 @@ private UploadPartRequest ConstructGenericUploadPartRequest(InitiateMultipartUpl DisableDefaultChecksumValidation = this._fileTransporterRequest.DisableDefaultChecksumValidation, DisablePayloadSigning = this._fileTransporterRequest.DisablePayloadSigning, ChecksumAlgorithm = this._fileTransporterRequest.ChecksumAlgorithm, - RequestPayer = this._fileTransporterRequest.RequestPayer + RequestPayer = this._fileTransporterRequest.RequestPayer, + ExpectedBucketOwner = this._fileTransporterRequest.ExpectedBucketOwner, }; // If the InitiateMultipartUploadResponse indicates that this upload is using KMS, force SigV4 for each UploadPart request @@ -265,7 +267,7 @@ private UploadPartRequest ConstructGenericUploadPartRequest(InitiateMultipartUpl return uploadPartRequest; } - private UploadPartRequest ConstructUploadPartRequestForNonSeekableStream(Stream inputStream, int partNumber, long partSize, bool isLastPart, InitiateMultipartUploadResponse initiateResponse) + internal UploadPartRequest ConstructUploadPartRequestForNonSeekableStream(Stream inputStream, int partNumber, long partSize, bool isLastPart, InitiateMultipartUploadResponse initiateResponse) { UploadPartRequest uploadPartRequest = ConstructGenericUploadPartRequest(initiateResponse); @@ -285,12 +287,12 @@ private UploadPartRequest ConstructUploadPartRequestForNonSeekableStream(Stream return uploadPartRequest; } - private InitiateMultipartUploadRequest ConstructInitiateMultipartUploadRequest() + internal InitiateMultipartUploadRequest ConstructInitiateMultipartUploadRequest() { return this.ConstructInitiateMultipartUploadRequest(null); } - private InitiateMultipartUploadRequest ConstructInitiateMultipartUploadRequest(RequestEventHandler requestEventHandler) + internal InitiateMultipartUploadRequest ConstructInitiateMultipartUploadRequest(RequestEventHandler requestEventHandler) { var initRequest = new InitiateMultipartUploadRequest() { @@ -304,11 +306,17 @@ private InitiateMultipartUploadRequest ConstructInitiateMultipartUploadRequest(R ServerSideEncryptionCustomerMethod = this._fileTransporterRequest.ServerSideEncryptionCustomerMethod, ServerSideEncryptionCustomerProvidedKey = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKey, ServerSideEncryptionCustomerProvidedKeyMD5 = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKeyMD5, + ServerSideEncryptionKeyManagementServiceEncryptionContext = this._fileTransporterRequest.SSEKMSEncryptionContext, TagSet = this._fileTransporterRequest.TagSet, ChecksumAlgorithm = this._fileTransporterRequest.ChecksumAlgorithm, ObjectLockLegalHoldStatus = this._fileTransporterRequest.ObjectLockLegalHoldStatus, ObjectLockMode = this._fileTransporterRequest.ObjectLockMode, - RequestPayer = this._fileTransporterRequest.RequestPayer + RequestPayer = this._fileTransporterRequest.RequestPayer, + ExpectedBucketOwner = this._fileTransporterRequest.ExpectedBucketOwner, + Grants = this._fileTransporterRequest.Grants, + Metadata = this._fileTransporterRequest.Metadata, + WebsiteRedirectLocation = this._fileTransporterRequest.WebsiteRedirectLocation, + BucketKeyEnabled = this._fileTransporterRequest.BucketKeyEnabled, }; if (this._fileTransporterRequest.IsSetObjectLockRetainUntilDate()) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs index e34df962f364..57eab52d3f98 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs @@ -41,7 +41,7 @@ internal OpenStreamCommand(IAmazonS3 s3Client, TransferUtilityOpenStreamRequest this._request = request; } - private GetObjectRequest ConstructRequest() + internal GetObjectRequest ConstructRequest() { if (!this._request.IsSetBucketName()) { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs index 95a15611d2f5..3f10fa35b1d0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs @@ -50,7 +50,7 @@ internal SimpleUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config, T var fileName = fileTransporterRequest.FilePath; } - private PutObjectRequest ConstructRequest() + internal PutObjectRequest ConstructRequest() { PutObjectRequest putRequest = new PutObjectRequest() { @@ -78,7 +78,12 @@ private PutObjectRequest ConstructRequest() ChecksumCRC64NVME = this._fileTransporterRequest.ChecksumCRC64NVME, ChecksumSHA1 = this._fileTransporterRequest.ChecksumSHA1, ChecksumSHA256 = this._fileTransporterRequest.ChecksumSHA256, - RequestPayer = this._fileTransporterRequest.RequestPayer + RequestPayer = this._fileTransporterRequest.RequestPayer, + BucketKeyEnabled = this._fileTransporterRequest.BucketKeyEnabled, + ExpectedBucketOwner = this._fileTransporterRequest.ExpectedBucketOwner, + Grants = this._fileTransporterRequest.Grants, + ServerSideEncryptionKeyManagementServiceEncryptionContext = this._fileTransporterRequest.SSEKMSEncryptionContext, + WebsiteRedirectLocation = this._fileTransporterRequest.WebsiteRedirectLocation, }; // Avoid setting ContentType to null, as that may clear diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index 816c8ef12e6f..e4be9b27aa74 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -50,7 +50,7 @@ internal UploadDirectoryCommand(TransferUtility utility, TransferUtilityConfig c this._config = config; } - private TransferUtilityUploadRequest ConstructRequest(string basePath, string filepath, string prefix) + internal TransferUtilityUploadRequest ConstructRequest(string basePath, string filepath, string prefix) { string key = filepath.Substring(basePath.Length); key = key.Replace(@"\", "/"); @@ -79,6 +79,12 @@ private TransferUtilityUploadRequest ConstructRequest(string basePath, string fi RequestPayer = this._request.RequestPayer, DisableDefaultChecksumValidation = this._request.DisableDefaultChecksumValidation, ChecksumAlgorithm = this._request.ChecksumAlgorithm, + BucketKeyEnabled = this._request.BucketKeyEnabled, + ExpectedBucketOwner = this._request.ExpectedBucketOwner, + SSEKMSEncryptionContext = this._request.SSEKMSEncryptionContext, + WebsiteRedirectLocation = this._request.WebsiteRedirectLocation, + Headers = this._request.Headers, + Grants = this._request.Grants }; if (this._request.IsSetObjectLockRetainUntilDate()) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index 34bd339dc9a3..74e5f6c874a8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -168,17 +168,23 @@ private void Cleanup(string uploadId, List> tasks) AbortMultipartUpload(uploadId); } + internal AbortMultipartUploadRequest ConstructAbortMultipartUploadRequest(string uploadId) + { + return new AbortMultipartUploadRequest() + { + BucketName = this._fileTransporterRequest.BucketName, + ExpectedBucketOwner = this._fileTransporterRequest.ExpectedBucketOwner, + Key = this._fileTransporterRequest.Key, + RequestPayer = this._fileTransporterRequest.RequestPayer, + UploadId = uploadId + }; + } + private void AbortMultipartUpload(string uploadId) { try { - this._s3Client.AbortMultipartUploadAsync(new AbortMultipartUploadRequest() - { - BucketName = this._fileTransporterRequest.BucketName, - Key = this._fileTransporterRequest.Key, - RequestPayer = this._fileTransporterRequest.RequestPayer, - UploadId = uploadId - }).Wait(); + this._s3Client.AbortMultipartUploadAsync(ConstructAbortMultipartUploadRequest(uploadId)).Wait(); } catch (Exception e) { diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs index 2bffe3c249bb..b0556e92487a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs @@ -52,6 +52,11 @@ public class TransferUtilityDownloadDirectoryRequest private RequestPayer requestPayer; + private string expectedBucketOwner; + private string ifMatch; + private string ifNoneMatch; + private ResponseHeaderOverrides responseHeaders; + /// /// Gets or sets the name of the bucket. /// @@ -255,6 +260,108 @@ public RequestPayer RequestPayer get { return this.requestPayer; } set { this.requestPayer = value; } } + + /// + /// Gets and sets the property ExpectedBucketOwner. + /// + /// The account ID of the expected bucket owner. If the account ID that you provide does + /// not match the actual owner of the bucket, the request fails with the HTTP status code + /// 403 Forbidden (access denied). + /// + /// + public string ExpectedBucketOwner + { + get { return this.expectedBucketOwner; } + set { this.expectedBucketOwner = value; } + } + + /// + /// Checks to see if ExpectedBucketOwner is set. + /// + /// true, if ExpectedBucketOwner property is set. + internal bool IsSetExpectedBucketOwner() + { + return !String.IsNullOrEmpty(this.expectedBucketOwner); + } + + /// + /// Gets and sets the property IfMatch. + /// + /// Return the object only if its entity tag (ETag) is the same as the one specified in this header; + /// otherwise, return a 412 Precondition Failed error. + /// + /// + /// If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: + /// If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; + /// then, S3 returns 200 OK and the data requested. + /// + /// + /// For more information about conditional requests, see RFC 7232. + /// + /// + public string IfMatch + { + get { return this.ifMatch; } + set { this.ifMatch = value; } + } + + /// + /// Checks to see if IfMatch is set. + /// + /// true, if IfMatch property is set. + internal bool IsSetIfMatch() + { + return !String.IsNullOrEmpty(this.ifMatch); + } + + /// + /// Gets and sets the property IfNoneMatch. + /// + /// Return the object only if its entity tag (ETag) is different from the one specified in this header; + /// otherwise, return a 304 Not Modified error. + /// + /// + /// If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: + /// If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; + /// then, S3 returns 304 Not Modified HTTP status code. + /// + /// + /// For more information about conditional requests, see RFC 7232. + /// + /// + public string IfNoneMatch + { + get { return this.ifNoneMatch; } + set { this.ifNoneMatch = value; } + } + + /// + /// Checks to see if IfNoneMatch is set. + /// + /// true, if IfNoneMatch property is set. + internal bool IsSetIfNoneMatch() + { + return !String.IsNullOrEmpty(this.ifNoneMatch); + } + + /// + /// A set of response headers that should be returned with the object. + /// + public ResponseHeaderOverrides ResponseHeaderOverrides + { + get + { + if (this.responseHeaders == null) + { + this.responseHeaders = new ResponseHeaderOverrides(); + } + return this.responseHeaders; + } + set + { + this.responseHeaders = value; + } + } /// /// The event for DownloadedDirectoryProgressEvent notifications. All diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs index 5588d9ad9327..cf7be9f65437 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs @@ -38,35 +38,10 @@ namespace Amazon.S3.Transfer public class TransferUtilityUploadDirectoryRequest : BaseUploadRequest { string _directory; - string _bucketname; string _searchPattern = "*"; string _keyPrefix; - private string contentType; private bool _uploadFilesConcurrently = false; SearchOption _searchOption = SearchOption.TopDirectoryOnly; - S3CannedACL _cannedACL; - S3StorageClass _storageClass; - MetadataCollection metadataCollection; - ServerSideEncryptionMethod encryption; - string serverSideEncryptionKeyManagementServiceKeyId; - private ServerSideEncryptionCustomerMethod serverSideCustomerEncryption; - private string serverSideEncryptionCustomerProvidedKey; - private string serverSideEncryptionCustomerProvidedKeyMD5; - private List tagset; - private ObjectLockLegalHoldStatus objectLockLegalHoldStatus; - private ObjectLockMode objectLockMode; - private bool disablePayloadSigning; - private DateTime? objectLockRetainUntilDate; - private ChecksumAlgorithm checksumAlgorithm; - - /// - /// Gets or sets whether the payload should be signed or not - /// - public bool DisablePayloadSigning - { - get { return this.disablePayloadSigning; } - set { this.disablePayloadSigning = value; } - } /// /// Gets or sets the directory where files are uploaded from. @@ -152,226 +127,6 @@ public SearchOption SearchOption set { this._searchOption = value; } } - - /// - /// Gets or sets the name of the bucket. - /// - /// - /// The name of the bucket. - /// - public string BucketName - { - get { return this._bucketname; } - set { this._bucketname = value; } - } - - /// - /// Checks if BucketName property is set. - /// - /// true if BucketName property is set. - internal bool IsSetBucketName() - { - return !System.String.IsNullOrEmpty(this._bucketname); - } - - - /// - /// Gets or sets the canned access control list (ACL) - /// for the uploaded objects. - /// Please refer to - /// for - /// information on Amazon S3 canned ACLs. - /// - /// - /// The canned access control list (ACL) - /// for the uploaded objects. - /// - public S3CannedACL CannedACL - { - get { return this._cannedACL; } - set { this._cannedACL = value; } - } - - - /// - /// Checks if the CannedACL property is set. - /// - /// true if there is the CannedACL property is set. - internal bool IsSetCannedACL() - { - return (_cannedACL != null &&_cannedACL != S3CannedACL.NoACL); - } - - /// - /// Gets or sets the content type for the uploaded Amazon S3 objects. - /// The default behavior when this field is not set is to use the file - /// extension to set the content type. If this field is set to a value it - /// will be applied to all uploaded files in the directory, overriding - /// file extension inspection. - /// - /// - /// The content type for all the uploaded Amazon S3 objects. - /// - public string ContentType - { - get { return this.contentType; } - set { this.contentType = value; } - } - - - /// - /// Gets or sets the storage class for the uploaded Amazon S3 objects. - /// Please refer to - /// for - /// information on S3 Storage Classes. - /// - /// - /// The storage class for the uploaded Amazon S3 objects. - /// - public S3StorageClass StorageClass - { - get { return this._storageClass; } - set - { - this._storageClass = value; - } - } - - - /// - /// The collection of meta data for the request. - /// - public MetadataCollection Metadata - { - get - { - if (this.metadataCollection == null) - this.metadataCollection = new MetadataCollection(); - return this.metadataCollection; - } - internal set { this.metadataCollection = value; } - } - - #region ServerSideEncryption - - /// - /// Gets or sets the ServerSideEncryptionMethod property. - /// Specifies the encryption used on the server to - /// store the content. - /// - public ServerSideEncryptionMethod ServerSideEncryptionMethod - { - get { return this.encryption; } - set { this.encryption = value; } - } - - /// - /// The id of the AWS Key Management Service key that Amazon S3 should use to encrypt and decrypt the object. - /// If a key id is not specified, the default key will be used for encryption and decryption. - /// - [AWSProperty(Sensitive=true)] - public string ServerSideEncryptionKeyManagementServiceKeyId - { - get { return this.serverSideEncryptionKeyManagementServiceKeyId; } - set { this.serverSideEncryptionKeyManagementServiceKeyId = value; } - } - - /// - /// The Server-side encryption algorithm to be used with the customer provided key. - /// - public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod - { - get { return this.serverSideCustomerEncryption; } - set { this.serverSideCustomerEncryption = value; } - } - - /// - /// Checks if ServerSideEncryptionKeyManagementServiceKeyId property is set. - /// - /// true if ServerSideEncryptionKeyManagementServiceKeyId property is set. - internal bool IsSetServerSideEncryptionKeyManagementServiceKeyId() - { - return !System.String.IsNullOrEmpty(this.serverSideEncryptionKeyManagementServiceKeyId); - } - - /// - /// The base64-encoded encryption key for Amazon S3 to use to encrypt the object - /// - /// Using the encryption key you provide as part of your request Amazon S3 manages both the encryption, as it writes - /// to disks, and decryption, when you access your objects. Therefore, you don't need to maintain any data encryption code. The only - /// thing you do is manage the encryption keys you provide. - /// - /// - /// When you retrieve an object, you must provide the same encryption key as part of your request. Amazon S3 first verifies - /// the encryption key you provided matches, and then decrypts the object before returning the object data to you. - /// - /// - /// Important: Amazon S3 does not store the encryption key you provide. - /// - /// - [AWSProperty(Sensitive=true)] - public string ServerSideEncryptionCustomerProvidedKey - { - get { return this.serverSideEncryptionCustomerProvidedKey; } - set { this.serverSideEncryptionCustomerProvidedKey = value; } - } - - /// - /// The MD5 of the customer encryption key specified in the ServerSideEncryptionCustomerProvidedKey property. The MD5 is - /// base 64 encoded. This field is optional, the SDK will calculate the MD5 if this is not set. - /// - public string ServerSideEncryptionCustomerProvidedKeyMD5 - { - get { return this.serverSideEncryptionCustomerProvidedKeyMD5; } - set { this.serverSideEncryptionCustomerProvidedKeyMD5 = value; } - } - - #endregion - - /// - /// Gets and sets the property ObjectLockLegalHoldStatus. - /// - /// Specifies whether a legal hold will be applied to this object. For more information - /// about S3 Object Lock, see Object - /// Lock. - /// - /// - public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus - { - get { return this.objectLockLegalHoldStatus; } - set { this.objectLockLegalHoldStatus = value; } - } - - /// - /// Gets and sets the property ObjectLockMode. - /// - /// The Object Lock mode that you want to apply to this object. - /// - /// - public ObjectLockMode ObjectLockMode - { - get { return this.objectLockMode; } - set { this.objectLockMode = value; } - } - - /// - /// Gets and sets the property ObjectLockRetainUntilDate. - /// - /// The date and time when you want this object's Object Lock to expire. - /// - /// - public DateTime ObjectLockRetainUntilDate - { - get { return this.objectLockRetainUntilDate.GetValueOrDefault(); } - set { this.objectLockRetainUntilDate = value; } - } - - // Check to see if ObjectLockRetainUntilDate property is set - internal bool IsSetObjectLockRetainUntilDate() - { - return this.objectLockRetainUntilDate.HasValue; - } - /// /// Gets or sets the UploadFilesConcurrently property. /// Specifies if multiple files will be uploaded concurrently. @@ -444,41 +199,6 @@ internal void RaiseUploadDirectoryFileRequestEvent(TransferUtilityUploadRequest targetEvent(this, args); } } - - /// - /// Tags that will be applied to all objects in the diretory. - /// - public List TagSet - { - get { return this.tagset; } - set { this.tagset = value; } - } - - /// - /// WARNING: Setting DisableDefaultChecksumValidation to true disables the default data - /// integrity check on upload requests. - /// When true, checksum verification will not be used in upload requests. This may increase upload - /// performance under high CPU loads. The default value is false. - /// Checksums, SigV4 payload signing, and HTTPS each provide some data integrity - /// verification. If DisableDefaultChecksumValidation is true and DisablePayloadSigning is true, then the - /// possibility of data corruption is completely dependent on HTTPS being the only remaining - /// source of data integrity verification. - /// - public bool? DisableDefaultChecksumValidation { get; set; } - - /// - /// Gets and sets the property ChecksumAlgorithm. - /// - /// Indicates the algorithm used to create the checksum for each object in the provided directory. - /// For more information, see - /// Checking object integrity in the Amazon S3 User Guide. - /// - /// - public ChecksumAlgorithm ChecksumAlgorithm - { - get { return this.checksumAlgorithm; } - set { this.checksumAlgorithm = value; } - } } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs index 868fcf697dd8..b21ab2ae7602 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs @@ -38,20 +38,10 @@ namespace Amazon.S3.Transfer /// public partial class TransferUtilityUploadRequest : BaseUploadRequest { - private string bucketName; private string key; - private S3CannedACL cannedACL; - private string contentType; - private S3StorageClass storageClass; private long? partSize; private bool autoCloseStream = true; private bool autoResetStreamPosition = true; - private ServerSideEncryptionMethod encryption; - private ServerSideEncryptionCustomerMethod serverSideCustomerEncryption; - private string serverSideEncryptionCustomerProvidedKey; - private string serverSideEncryptionCustomerProvidedKeyMD5; - private string serverSideEncryptionKeyManagementServiceKeyId; - private ChecksumAlgorithm checksumAlgorithm; private string _checksumCRC32; private string _checksumCRC32C; private string _checksumCRC64NVME; @@ -61,41 +51,7 @@ public partial class TransferUtilityUploadRequest : BaseUploadRequest private string _ifMatch; private long? _mpuObjectSize; - private HeadersCollection headersCollection = new HeadersCollection(); - private MetadataCollection metadataCollection = new MetadataCollection(); - - private List tagset; - private Stream inputStream; - private ObjectLockLegalHoldStatus objectLockLegalHoldStatus; - private ObjectLockMode objectLockMode; - private DateTime? objectLockRetainUntilDate; - - #region BucketName - - /// - /// Gets or sets the name of the bucket. - /// - /// - /// The name of the bucket. - /// - public string BucketName - { - get { return this.bucketName; } - set { this.bucketName = value; } - } - - - /// - /// Checks if BucketName property is set. - /// - /// true if BucketName property is set. - internal bool IsSetBucketName() - { - return !System.String.IsNullOrEmpty(this.bucketName); - } - - #endregion #region Key /// @@ -122,166 +78,6 @@ internal bool IsSetKey() #endregion - #region CannedACL - - /// - /// Gets or sets the canned access control list (ACL) - /// for the uploaded object. - /// Please refer to - /// for - /// information on Amazon S3 canned ACLs. - /// - /// - /// The canned access control list (ACL) - /// for the uploaded object. - /// - public S3CannedACL CannedACL - { - get { return this.cannedACL; } - set { this.cannedACL = value; } - } - - /// - /// Checks if the CannedACL property is set. - /// - /// true if there is the CannedACL property is set. - internal bool IsSetCannedACL() - { - return (cannedACL != null); - } - - /// - /// Removes the cannned access control list (ACL) - /// for the uploaded object. - /// - public void RemoveCannedACL() - { - this.cannedACL = null; - } - - #endregion - - #region ContentType - /// - /// Gets or sets the content type of the uploaded Amazon S3 object. - /// - /// - /// The content type of the uploaded Amazon S3 object. - /// - public string ContentType - { - get { return this.contentType; } - set { this.contentType = value; } - } - - - /// - /// Checks if ContentType property is set. - /// - /// true if ContentType property is set. - internal bool IsSetContentType() - { - return !System.String.IsNullOrEmpty(this.contentType); - } - - #endregion - - #region StorageClass - - /// - /// Gets or sets the storage class for the uploaded Amazon S3 object. - /// Please refer to - /// for - /// information on S3 Storage Classes. - /// - /// - /// The storage class for the uploaded Amazon S3 object. - /// - public S3StorageClass StorageClass - { - get { return this.storageClass; } - set { this.storageClass = value; } - } - - #endregion - - #region ServerSideEncryption - - /// - /// Gets and sets the ServerSideEncryptionMethod property. - /// Specifies the encryption used on the server to - /// store the content. - /// - public ServerSideEncryptionMethod ServerSideEncryptionMethod - { - get { return this.encryption; } - set { this.encryption = value; } - } - - /// - /// The Server-side encryption algorithm to be used with the customer provided key. - /// - /// - public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod - { - get { return this.serverSideCustomerEncryption; } - set { this.serverSideCustomerEncryption = value; } - } - - /// - /// The id of the AWS Key Management Service key that Amazon S3 should use to encrypt and decrypt the object. - /// If a key id is not specified, the default key will be used for encryption and decryption. - /// - [AWSProperty(Sensitive=true)] - public string ServerSideEncryptionKeyManagementServiceKeyId - { - get { return this.serverSideEncryptionKeyManagementServiceKeyId; } - set { this.serverSideEncryptionKeyManagementServiceKeyId = value; } - } - - /// - /// Checks if ServerSideEncryptionKeyManagementServiceKeyId property is set. - /// - /// true if ServerSideEncryptionKeyManagementServiceKeyId property is set. - internal bool IsSetServerSideEncryptionKeyManagementServiceKeyId() - { - return !System.String.IsNullOrEmpty(this.serverSideEncryptionKeyManagementServiceKeyId); - } - - /// - /// The Base64 encoded encryption key for Amazon S3 to use to encrypt the object - /// - /// Using the encryption key you provide as part of your request Amazon S3 manages both the encryption, as it writes - /// to disks, and decryption, when you access your objects. Therefore, you don't need to maintain any data encryption code. The only - /// thing you do is manage the encryption keys you provide. - /// - /// - /// When you retrieve an object, you must provide the same encryption key as part of your request. Amazon S3 first verifies - /// the encryption key you provided matches, and then decrypts the object before returning the object data to you. - /// - /// - /// Important: Amazon S3 does not store the encryption key you provide. - /// - /// - [AWSProperty(Sensitive=true)] - public string ServerSideEncryptionCustomerProvidedKey - { - get { return this.serverSideEncryptionCustomerProvidedKey; } - set { this.serverSideEncryptionCustomerProvidedKey = value; } - } - - /// - /// The MD5 of the customer encryption key specified in the ServerSideEncryptionCustomerProvidedKey property. The MD5 is - /// base 64 encoded. This field is optional, the SDK will calculate the MD5 if this is not set. - /// - public string ServerSideEncryptionCustomerProvidedKeyMD5 - { - get { return this.serverSideEncryptionCustomerProvidedKeyMD5; } - set { this.serverSideEncryptionCustomerProvidedKeyMD5 = value; } - } - - #endregion - /// /// Input stream for the request; content for the request will be read from the stream. /// @@ -345,43 +141,6 @@ internal bool IsSetPartSize() return this.partSize.HasValue; } - /// - /// The collection of headers for the request. - /// - public HeadersCollection Headers - { - get - { - if (this.headersCollection == null) - this.headersCollection = new HeadersCollection(); - return this.headersCollection; - } - internal set { this.headersCollection = value; } - } - - /// - /// The collection of meta data for the request. - /// - public MetadataCollection Metadata - { - get - { - if (this.metadataCollection == null) - this.metadataCollection = new MetadataCollection(); - return this.metadataCollection; - } - internal set { this.metadataCollection = value; } - } - - /// - /// The tag-set for the object. - /// - public List TagSet - { - get { return this.tagset; } - set { this.tagset = value; } - } - /// /// The event for UploadProgressEvent notifications. All /// subscribers will be notified when a new progress @@ -502,100 +261,6 @@ public TransferUtilityUploadRequest WithAutoCloseStream(bool autoCloseStream) } #endregion - /// - /// WARNING: Setting DisableDefaultChecksumValidation to true disables the default data - /// integrity check on upload requests. - /// When true, checksum verification will not be used in upload requests. This may increase upload - /// performance under high CPU loads. Setting DisableDefaultChecksumValidation sets the deprecated property - /// DisableMD5Stream to the same value. The default value is false. - /// Checksums, SigV4 payload signing, and HTTPS each provide some data integrity - /// verification. If DisableDefaultChecksumValidation is true and DisablePayloadSigning is true, then the - /// possibility of data corruption is completely dependent on HTTPS being the only remaining - /// source of data integrity verification. - /// - public bool? DisableDefaultChecksumValidation { get; set; } - - /// - /// WARNING: Setting DisablePayloadSigning to true disables the SigV4 payload signing - /// data integrity check on this request. - /// If using SigV4, the DisablePayloadSigning flag controls if the payload should be - /// signed on a request by request basis. By default this flag is null which will use the - /// default client behavior. The default client behavior is to sign the payload. When - /// DisablePayloadSigning is true, the request will be signed with an UNSIGNED-PAYLOAD value. - /// Setting DisablePayloadSigning to true requires that the request is sent over a HTTPS - /// connection. - /// Under certain circumstances, such as uploading to S3 while using MD5 hashing, it may - /// be desireable to use UNSIGNED-PAYLOAD to decrease signing CPU usage. This flag only applies - /// to Amazon S3 PutObject and UploadPart requests. - /// MD5Stream, SigV4 payload signing, and HTTPS each provide some data integrity - /// verification. If DisableMD5Stream is true and DisablePayloadSigning is true, then the - /// possibility of data corruption is completely dependant on HTTPS being the only remaining - /// source of data integrity verification. - /// - public bool? DisablePayloadSigning { get; set; } - - /// - /// Gets and sets the property ObjectLockLegalHoldStatus. - /// - /// Specifies whether a legal hold will be applied to this object. For more information - /// about S3 Object Lock, see Object - /// Lock. - /// - /// - public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus - { - get { return this.objectLockLegalHoldStatus; } - set { this.objectLockLegalHoldStatus = value; } - } - - /// - /// Gets and sets the property ObjectLockMode. - /// - /// The Object Lock mode that you want to apply to this object. - /// - /// - public ObjectLockMode ObjectLockMode - { - get { return this.objectLockMode; } - set { this.objectLockMode = value; } - } - - /// - /// Gets and sets the property ObjectLockRetainUntilDate. - /// - /// The date and time when you want this object's Object Lock to expire. - /// - /// - public DateTime ObjectLockRetainUntilDate - { - get { return this.objectLockRetainUntilDate.GetValueOrDefault(); } - set { this.objectLockRetainUntilDate = value; } - } - - // Check to see if ObjectLockRetainUntilDate property is set - internal bool IsSetObjectLockRetainUntilDate() - { - return this.objectLockRetainUntilDate.HasValue; - } - - /// - /// Gets and sets the property ChecksumAlgorithm. - /// - /// Indicates the algorithm used to create the checksum for the object. - /// For more information, see - /// Checking object integrity in the Amazon S3 User Guide. - /// - /// - /// - /// If you provide an individual checksum, Amazon S3 will ignore any provided ChecksumAlgorithm. - /// - /// - public ChecksumAlgorithm ChecksumAlgorithm - { - get { return this.checksumAlgorithm; } - set { this.checksumAlgorithm = value; } - } - /// /// Gets and sets the property ChecksumCRC32. /// diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json index 245790b5fdbe..e9de0a44b4fe 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -1,10 +1,117 @@ { "PropertyAliases": { - "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", - "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", - "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", - "ServerSideEncryption": "ServerSideEncryptionMethod", - "SSEKMSEncryptionContext": "ServerSideEncryptionKeyManagementServiceEncryptionContext", - "Restore": "RestoreExpiration" + "PutObjectResponse": { + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSEncryptionContext": "ServerSideEncryptionKeyManagementServiceEncryptionContext", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" + }, + "TransferUtilityUploadResponse": { + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSEncryptionContext": "ServerSideEncryptionKeyManagementServiceEncryptionContext", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" + }, + "PutObjectRequest": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSEKMSEncryptionContext": "ServerSideEncryptionKeyManagementServiceEncryptionContext", + "ACL": "CannedACL", + "Bucket": "BucketName", + "SSECustomerKey": "ServerSideEncryptionCustomerProvidedKey", + "Tagging": "TagSet", + "GrantFullControl": "Grants", + "GrantRead": "Grants", + "GrantReadACP": "Grants", + "GrantWriteACP": "Grants" + }, + "GetObjectRequest": { + "Bucket": "BucketName", + "SSECustomerKey": "ServerSideEncryptionCustomerProvidedKey", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "ResponseCacheControl": "CacheControl", + "ResponseContentDisposition": "ContentDisposition", + "ResponseContentEncoding": "ContentEncoding", + "ResponseContentLanguage": "ContentLanguage", + "ResponseContentType": "ContentType", + "IfMatch": "EtagToMatch", + "IfNoneMatch": "EtagToNotMatch", + "IfModifiedSince": "ModifiedSinceDate", + "IfUnmodifiedSince": "UnmodifiedSinceDate" + }, + "TransferUtilityDownloadRequest": { + "Bucket": "BucketName", + "IfModifiedSince": "ModifiedSinceDate", + "IfUnmodifiedSince": "UnmodifiedSinceDate", + "ResponseCacheControl": "CacheControl", + "ResponseContentDisposition": "ContentDisposition", + "ResponseContentEncoding": "ContentEncoding", + "ResponseContentLanguage": "ContentLanguage", + "ResponseContentType": "ContentType", + "ResponseExpires": "Expires", + "SSECustomerKey": "ServerSideEncryptionCustomerProvidedKey", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5" + }, + "TransferUtilityUploadRequest": { + "ACL": "CannedACL", + "Bucket": "BucketName", + "GrantFullControl": "Grants", + "GrantRead": "Grants", + "GrantReadACP": "Grants", + "GrantWriteACP": "Grants", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKey": "ServerSideEncryptionCustomerProvidedKey", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "Tagging": "TagSet" + }, + "TransferUtilityUploadDirectoryRequest": { + "ACL": "CannedACL", + "Bucket": "BucketName", + "GrantFullControl": "Grants", + "GrantRead": "Grants", + "GrantReadACP": "Grants", + "GrantWriteACP": "Grants", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKey": "ServerSideEncryptionCustomerProvidedKey", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "Tagging": "TagSet" + }, + "InitiateMultipartUploadRequest": { + "ACL": "CannedACL", + "Bucket": "BucketName", + "GrantFullControl": "Grants", + "GrantRead": "Grants", + "GrantReadACP": "Grants", + "GrantWriteACP": "Grants", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKey": "ServerSideEncryptionCustomerProvidedKey", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "Tagging": "TagSet", + "SSEKMSEncryptionContext": "ServerSideEncryptionKeyManagementServiceEncryptionContext" + }, + "UploadPartRequest": { + "Bucket": "BucketName", + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKey": "ServerSideEncryptionCustomerProvidedKey", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5" + }, + "CompleteMultipartUploadRequest": { + "Bucket": "BucketName" + }, + "AbortMultipartUploadRequest": { + "Bucket": "BucketName" + } } -} +} \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index ce4960e85985..b9a89d63ba70 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -34,7 +34,8 @@ public class ResponseMapperTests { private static JsonDocument _mappingJson; private static JsonDocument _propertyAliasesJson; - private static Dictionary _propertyAliases; + private static Dictionary> _propertyAliases; + private static List _s3Grants; [ClassInitialize] public static void ClassInitialize(TestContext context) @@ -65,17 +66,22 @@ public static void ClassInitialize(TestContext context) _propertyAliasesJson = JsonDocument.Parse(aliasContent); // Convert to dictionary for fast lookup - _propertyAliases = new Dictionary(); - var aliasesElement = _propertyAliasesJson.RootElement.GetProperty("PropertyAliases"); - foreach (var alias in aliasesElement.EnumerateObject()) + _propertyAliases = new Dictionary>(); + var objectElement = _propertyAliasesJson.RootElement.GetProperty("PropertyAliases"); + foreach (var objectName in objectElement.EnumerateObject()) { - _propertyAliases[alias.Name] = alias.Value.GetString(); + var aliases = new Dictionary(); + foreach (var alias in objectName.Value.EnumerateObject()) + { + aliases[alias.Name] = alias.Value.GetString(); + } + _propertyAliases[objectName.Name] = aliases; } } } else { - _propertyAliases = new Dictionary(); + _propertyAliases = new Dictionary>(); } } } @@ -128,68 +134,304 @@ public static void ClassCleanup() [TestMethod] [TestCategory("S3")] public void MapPutObjectResponse_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "PutObjectResponse", "UploadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapPutObjectResponse(sourceResponse); + }, + usesHeadersCollection: false, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 1024; + }, + (sourceResponse, targetResponse) => + { + Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); + Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapUploadRequest_PutObjectRequest_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "UploadRequest", "PutObjectRequest" }, + (sourceRequest) => + { + var simpleUploadCommand = new SimpleUploadCommand(null, null, sourceRequest); + return simpleUploadCommand.ConstructRequest(); + }, + usesHeadersCollection: false); + } + + [TestMethod] + [TestCategory("S3")] + public void MapUploadRequest_CreateMultipartRequest_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "UploadRequest", "CreateMultipartRequest" }, + (sourceRequest) => + { + var multipartUploadCommand = new MultipartUploadCommand(null, null, sourceRequest); + return multipartUploadCommand.ConstructInitiateMultipartUploadRequest(); + }, + usesHeadersCollection: true, + (sourceRequest) => + { + sourceRequest.InputStream = new MemoryStream(1024); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapUploadRequest_UploadPartRequest_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "UploadRequest", "UploadPartRequest" }, + (sourceRequest) => + { + var multipartUploadCommand = new MultipartUploadCommand(null, null, sourceRequest); + + var initiateResponse = new InitiateMultipartUploadResponse + { + UploadId = "test-upload-id" + }; + + return multipartUploadCommand.ConstructUploadPartRequest(1, 1024, initiateResponse); + }, + usesHeadersCollection: false, + (sourceRequest) => + { + sourceRequest.InputStream = new MemoryStream(1024); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapUploadRequest_CompleteMultipartRequest_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "UploadRequest", "CompleteMultipartRequest" }, + (sourceRequest) => + { + var multipartUploadCommand = new MultipartUploadCommand(null, null, sourceRequest); + + var initiateResponse = new InitiateMultipartUploadResponse + { + UploadId = "test-upload-id", + ChecksumType = ChecksumType.FULL_OBJECT + }; + + return multipartUploadCommand.ConstructCompleteMultipartUploadRequest(initiateResponse); + }, + usesHeadersCollection: false, + (sourceRequest) => + { + sourceRequest.InputStream = new MemoryStream(1024); + sourceRequest.ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256; + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapUploadRequest_AbortMultipartRequest_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "UploadRequest", "AbortMultipartRequest" }, + (sourceRequest) => + { + var multipartUploadCommand = new MultipartUploadCommand(null, null, sourceRequest); + + return multipartUploadCommand.ConstructAbortMultipartUploadRequest("test-upload-id"); + }, + usesHeadersCollection: false, + (sourceRequest) => + { + sourceRequest.InputStream = new MemoryStream(1024); + sourceRequest.ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256; + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapPutObjectResponse_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new PutObjectResponse { Expiration = null }, + + // Test null enum conversions + new PutObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapPutObjectResponse(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + } + } + + private void ValidateMappingTransferUtilityAndSdkRequests( + string[] mappingPath, + Func fetchTargetRequest, + bool usesHeadersCollection = false, + Action requestHook = null, + Action additionalValidations = null) { // Get the expected mappings from JSON - var putObjectMappings = _mappingJson.RootElement - .GetProperty("Conversion") - .GetProperty("PutObjectResponse") - .GetProperty("UploadResponse") + JsonElement mappingElement = _mappingJson.RootElement; + + foreach (var path in mappingPath) + { + mappingElement = mappingElement.GetProperty(path); + } + + // Get the expected mappings from JSON + var requestMappings = mappingElement .EnumerateArray() .Select(prop => prop.GetString()) .ToList(); // Create source object with dynamically generated test data - var sourceResponse = new PutObjectResponse(); - var sourceType = typeof(PutObjectResponse); + var sourceRequest = Activator.CreateInstance(); + var sourceType = typeof(TSourceRequest); var testDataValues = new Dictionary(); // Generate test data for each mapped property - foreach (var propertyName in putObjectMappings) + foreach (var propertyName in requestMappings) { // Resolve alias to actual property name - var resolvedPropertyName = ResolvePropertyName(propertyName); + var resolvedPropertyName = ResolvePropertyName(propertyName, sourceType.Name); var sourceProperty = sourceType.GetProperty(resolvedPropertyName); if (sourceProperty?.CanWrite == true) { var testValue = GenerateTestValue(sourceProperty.PropertyType, propertyName); - sourceProperty.SetValue(sourceResponse, testValue); + sourceProperty.SetValue(sourceRequest, testValue); testDataValues[propertyName] = testValue; } } - // Add inherited properties for comprehensive testing - sourceResponse.HttpStatusCode = HttpStatusCode.OK; - sourceResponse.ContentLength = 1024; + requestHook?.Invoke(sourceRequest); // Map the response - var mappedResponse = ResponseMapper.MapPutObjectResponse(sourceResponse); - Assert.IsNotNull(mappedResponse, "Mapped response should not be null"); + var mappedRequest = fetchTargetRequest(sourceRequest); + Assert.IsNotNull(mappedRequest, "Mapped request should not be null"); // Verify all mapped properties using reflection - var targetType = typeof(TransferUtilityUploadResponse); + var targetType = typeof(TTargetRequest); var failedAssertions = new List(); - foreach (var propertyName in putObjectMappings) + foreach (var propertyName in requestMappings) { // Resolve alias to actual property name for reflection lookups - var resolvedPropertyName = ResolvePropertyName(propertyName); - var sourceProperty = sourceType.GetProperty(resolvedPropertyName); - var targetProperty = targetType.GetProperty(resolvedPropertyName); + var resolvedSourcePropertyName = ResolvePropertyName(propertyName, sourceType.Name); + var resolvedTargetPropertyName = ResolvePropertyName(propertyName, targetType.Name); + var sourceProperty = sourceType.GetProperty(resolvedSourcePropertyName); + var targetProperty = targetType.GetProperty(resolvedTargetPropertyName); - if (sourceProperty == null) + object sourceValue = null; + + if (sourceProperty != null) + { + // Property found directly on source type + sourceValue = sourceProperty.GetValue(sourceRequest); + } + else { - failedAssertions.Add($"Source property '{propertyName}' (resolved to: {resolvedPropertyName}) not found in PutObjectResponse"); - continue; + if (!usesHeadersCollection) + { + failedAssertions.Add($"Source property '{propertyName}' (resolved to: {resolvedSourcePropertyName}) not found in {sourceType.Name}"); + continue; + } + + // Check if source type has a Headers property of type HeadersCollection + var sourceHeadersProperty = sourceType.GetProperty("Headers"); + if (sourceHeadersProperty != null && typeof(HeadersCollection).IsAssignableFrom(sourceHeadersProperty.PropertyType)) + { + var sourceHeadersCollection = sourceHeadersProperty.GetValue(sourceRequest) as HeadersCollection; + if (sourceHeadersCollection != null) + { + var sourceHeadersCollectionProperty = typeof(HeadersCollection).GetProperty(resolvedSourcePropertyName); + if (sourceHeadersCollectionProperty != null) + { + sourceValue = sourceHeadersCollectionProperty.GetValue(sourceHeadersCollection); + } + else + { + failedAssertions.Add($"Source property '{propertyName}' (resolved to: {resolvedSourcePropertyName}) not found in {sourceType.Name} or HeadersCollection"); + continue; + } + } + else + { + failedAssertions.Add($"Source Headers collection is null in {sourceType.Name}"); + continue; + } + } + else + { + failedAssertions.Add($"Source property '{propertyName}' (resolved to: {resolvedSourcePropertyName}) not found in {sourceType.Name}"); + continue; + } } - if (targetProperty == null) + object targetValue = null; + + if (targetProperty != null) { - failedAssertions.Add($"Target property '{propertyName}' (resolved to: {resolvedPropertyName}) not found in TransferUtilityUploadResponse"); - continue; + // Property found directly on target type + targetValue = targetProperty.GetValue(mappedRequest); } + else + { + if (!usesHeadersCollection) + { + failedAssertions.Add($"Target property '{propertyName}' (resolved to: {resolvedTargetPropertyName}) not found in {targetType.Name}"); + continue; + } - var sourceValue = sourceProperty.GetValue(sourceResponse); - var targetValue = targetProperty.GetValue(mappedResponse); + // Check if target type has a Headers property of type HeadersCollection + var headersProperty = targetType.GetProperty("Headers"); + if (headersProperty != null && typeof(HeadersCollection).IsAssignableFrom(headersProperty.PropertyType)) + { + var headersCollection = headersProperty.GetValue(mappedRequest) as HeadersCollection; + if (headersCollection != null) + { + var headersCollectionProperty = typeof(HeadersCollection).GetProperty(resolvedTargetPropertyName); + if (headersCollectionProperty != null) + { + targetValue = headersCollectionProperty.GetValue(headersCollection); + } + else + { + failedAssertions.Add($"Target property '{propertyName}' (resolved to: {resolvedTargetPropertyName}) not found in {targetType.Name} or HeadersCollection"); + continue; + } + } + else + { + failedAssertions.Add($"Headers collection is null in {targetType.Name}"); + continue; + } + } + else + { + failedAssertions.Add($"Target property '{propertyName}' (resolved to: {resolvedTargetPropertyName}) not found in {targetType.Name}"); + continue; + } + } // Special handling for complex object comparisons if (!AreValuesEqual(sourceValue, targetValue)) @@ -198,9 +440,7 @@ public void MapPutObjectResponse_AllMappedProperties_WorkCorrectly() } } - // Test inherited properties - Assert.AreEqual(sourceResponse.HttpStatusCode, mappedResponse.HttpStatusCode, "HttpStatusCode should match"); - Assert.AreEqual(sourceResponse.ContentLength, mappedResponse.ContentLength, "ContentLength should match"); + additionalValidations?.Invoke(sourceRequest, mappedRequest); // Report any failures if (failedAssertions.Any()) @@ -209,33 +449,6 @@ public void MapPutObjectResponse_AllMappedProperties_WorkCorrectly() } } - [TestMethod] - [TestCategory("S3")] - public void MapPutObjectResponse_NullValues_HandledCorrectly() - { - // Test null handling scenarios - var testCases = new[] - { - // Test null Expiration - new PutObjectResponse { Expiration = null }, - - // Test null enum conversions - new PutObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } - }; - - foreach (var testCase in testCases) - { - var mapped = ResponseMapper.MapPutObjectResponse(testCase); - Assert.IsNotNull(mapped, "Response should always be mappable"); - - // Test null handling - if (testCase.Expiration == null) - { - Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); - } - } - } - [TestMethod] [TestCategory("S3")] public void ValidateTransferUtilityUploadResponseDefinitionCompleteness() @@ -253,35 +466,74 @@ public void ValidateCompleteMultipartUploadResponseConversionCompleteness() new[] { "Conversion", "CompleteMultipartResponse", "UploadResponse" }, "TransferUtilityUploadResponse"); } - - // Uncomment for DOTNET-8277 - - // [TestMethod] - // [TestCategory("S3")] - // public void ValidatePutObjectRequestDefinitionCompleteness() - // { - // ValidateResponseDefinitionCompleteness( - // new[] { "Definition", "UploadRequest", "PutObjectRequest" }, - // "PutObjectRequest"); - // } - - // [TestMethod] - // [TestCategory("S3")] - // public void ValidateGetObjectRequestDefinitionCompleteness() - // { - // ValidateResponseDefinitionCompleteness( - // new[] { "Definition", "DownloadRequest", "GetObjectRequest" }, - // "GetObjectRequest"); - // } - - // [TestMethod] - // [TestCategory("S3")] - // public void ValidateGetObjectRequestDefinitionCompleteness() - // { - // ValidateResponseDefinitionCompleteness( - // new[] { "Definition", "DownloadRequest", "GetObjectRequest" }, - // "TransferUtilityDownloadRequest"); - // } + + [TestMethod] + [TestCategory("S3")] + public void ValidatePutObjectRequestDefinitionCompleteness() + { + ValidateResponseDefinitionCompleteness( + new[] { "Definition", "UploadRequest", "PutObjectRequest" }, + "PutObjectRequest", + () => + { + return typeof(HeadersCollection) + .GetProperties(BindingFlags.Public | BindingFlags.Instance) + .Where(p => p.CanRead) + .Select(p => p.Name) + .ToList(); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void ValidateGetObjectRequestDefinitionCompleteness() + { + ValidateResponseDefinitionCompleteness( + new[] { "Definition", "DownloadRequest", "GetObjectRequest" }, + "GetObjectRequest", + () => + { + return typeof(ResponseHeaderOverrides) + .GetProperties(BindingFlags.Public | BindingFlags.Instance) + .Where(p => p.CanRead) + .Select(p => p.Name) + .ToList(); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void ValidateTransferUtilityDownloadRequestDefinitionCompleteness() + { + ValidateResponseDefinitionCompleteness( + new[] { "Definition", "DownloadRequest", "GetObjectRequest" }, + "TransferUtilityDownloadRequest", + () => + { + return typeof(ResponseHeaderOverrides) + .GetProperties(BindingFlags.Public | BindingFlags.Instance) + .Where(p => p.CanRead) + .Select(p => p.Name) + .ToList(); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void ValidateTransferUtilityUploadRequestDefinitionCompleteness() + { + ValidateResponseDefinitionCompleteness( + new[] { "Definition", "UploadRequest", "PutObjectRequest" }, + "TransferUtilityUploadRequest", + () => + { + return typeof(HeadersCollection) + .GetProperties(BindingFlags.Public | BindingFlags.Instance) + .Where(p => p.CanRead) + .Select(p => p.Name) + .ToList(); + }); + } /// /// Generates appropriate test data for a given property type @@ -344,6 +596,36 @@ private static object GenerateTestValue(Type propertyType, string propertyName) return 1024; } + if (propertyType == typeof(List)) + { + if (_s3Grants is null) + { + _s3Grants = new List { new S3Grant { Grantee = new S3Grantee { DisplayName = "test-s3grantee"} } }; + } + + return _s3Grants; + } + + if (propertyType == typeof(MetadataCollection)) + { + var metadataCollection = new MetadataCollection(); + metadataCollection.Add("x-amz-meta-testkey", "testvalue"); + return metadataCollection; + } + + if (propertyType == typeof(DateTime)) + { + return DateTime.UtcNow; + } + + if (propertyType == typeof(List)) + { + return new List + { + new Tag { Key = "test-key", Value = "test-value" } + }; + } + // For unknown types, throw an exception instead of returning null // If we've reached this point it means there is an unhandled scenario/missing mapping in our test code that we need to handle. throw new NotSupportedException( @@ -378,14 +660,17 @@ private static bool AreValuesEqual(object sourceValue, object targetValue) /// /// Resolves a property name to its actual class property name, checking aliases if needed /// - private static string ResolvePropertyName(string propertyName) + private static string ResolvePropertyName(string propertyName, string responseTypeName) { - // Check if there's an alias for this property name - if (_propertyAliases.TryGetValue(propertyName, out var aliasedName)) + if (_propertyAliases.TryGetValue(responseTypeName, out var objectAliases)) { - return aliasedName; + // Check if there's an alias for this property name + if (objectAliases.TryGetValue(propertyName, out var aliasedName)) + { + return aliasedName; + } } - + // Return the original name if no alias exists return propertyName; } @@ -434,7 +719,7 @@ private static void ValidateResponseDefinitionCompleteness( foreach (var definitionProperty in definitionProperties) { - var resolvedPropertyName = ResolvePropertyName(definitionProperty); + var resolvedPropertyName = ResolvePropertyName(definitionProperty, responseTypeName); // Check if the resolved property name exists in the actual class if (!actualProperties.Contains(resolvedPropertyName)) From 9cd5c6c648397cf1a83a8fda8ab382e3a46d2de7 Mon Sep 17 00:00:00 2001 From: Phil Asmar Date: Wed, 22 Oct 2025 16:25:13 -0400 Subject: [PATCH 04/56] Fix issue with HeadersCollection in ResponseMapperTests --- .../UnitTests/Custom/ResponseMapperTests.cs | 33 ++++++++++++++++--- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index b9a89d63ba70..8415e9ab9962 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -315,11 +315,36 @@ private void ValidateMappingTransferUtilityAndSdkRequests Date: Thu, 23 Oct 2025 12:03:52 -0400 Subject: [PATCH 05/56] Add Progress listeners for initiated, complete, and failed for simple upload (#4059) --- .../433a9a6d-b8ea-4676-b763-70711e8288e2.json | 11 + .../Transfer/Internal/SimpleUploadCommand.cs | 49 ++- .../_async/SimpleUploadCommand.async.cs | 13 +- .../Transfer/TransferUtilityUploadRequest.cs | 286 +++++++++++++++++- .../IntegrationTests/TransferUtilityTests.cs | 233 +++++++++++++- .../UnitTests/Custom/ResponseMapperTests.cs | 6 +- 6 files changed, 590 insertions(+), 8 deletions(-) create mode 100644 generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json new file mode 100644 index 000000000000..e99cbe1c4bc1 --- /dev/null +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent for non multipart uploads." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs index 3f10fa35b1d0..d8de23a6145b 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs @@ -41,12 +41,18 @@ internal partial class SimpleUploadCommand : BaseCommand IAmazonS3 _s3Client; TransferUtilityConfig _config; TransferUtilityUploadRequest _fileTransporterRequest; + long _totalTransferredBytes; + private readonly long _contentLength; internal SimpleUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config, TransferUtilityUploadRequest fileTransporterRequest) { this._s3Client = s3Client; this._config = config; this._fileTransporterRequest = fileTransporterRequest; + + // Cache content length immediately while stream is accessible to avoid ObjectDisposedException in failure scenarios + this._contentLength = this._fileTransporterRequest.ContentLength; + var fileName = fileTransporterRequest.FilePath; } @@ -108,9 +114,48 @@ internal PutObjectRequest ConstructRequest() private void PutObjectProgressEventCallback(object sender, UploadProgressArgs e) { - var progressArgs = new UploadProgressArgs(e.IncrementTransferred, e.TransferredBytes, e.TotalBytes, - e.CompensationForRetry, _fileTransporterRequest.FilePath); + // Keep track of the total transferred bytes so that we can also return this value in case of failure + long transferredBytes = Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred - e.CompensationForRetry); + + var progressArgs = new UploadProgressArgs(e.IncrementTransferred, transferredBytes, _contentLength, + e.CompensationForRetry, _fileTransporterRequest.FilePath, _fileTransporterRequest); this._fileTransporterRequest.OnRaiseProgressEvent(progressArgs); } + + private void FireTransferInitiatedEvent() + { + var initiatedArgs = new UploadInitiatedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferInitiatedEvent(initiatedArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityUploadResponse response) + { + var completedArgs = new UploadCompletedEventArgs( + request: _fileTransporterRequest, + response: response, + filePath: _fileTransporterRequest.FilePath, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferCompletedEvent(completedArgs); + } + + private void FireTransferFailedEvent() + { + var failedArgs = new UploadFailedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferFailedEvent(failedArgs); + } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs index e4c94d65044f..51680eaaba09 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs @@ -38,9 +38,20 @@ await this.AsyncThrottler.WaitAsync(cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); } + FireTransferInitiatedEvent(); + var putRequest = ConstructRequest(); - await _s3Client.PutObjectAsync(putRequest, cancellationToken) + var response = await _s3Client.PutObjectAsync(putRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + var mappedResponse = ResponseMapper.MapPutObjectResponse(response); + + FireTransferCompletedEvent(mappedResponse); + } + catch (Exception) + { + FireTransferFailedEvent(); + throw; } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs index b21ab2ae7602..7e54dc52d5d5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs @@ -25,6 +25,7 @@ using System.IO; using System.Text; +using Amazon.Runtime; using Amazon.Runtime.Internal; using Amazon.S3.Model; using Amazon.Util; @@ -170,6 +171,132 @@ internal bool IsSetPartSize() /// public event EventHandler UploadProgressEvent; + /// + /// The event for UploadInitiatedEvent notifications. All + /// subscribers will be notified when a transfer operation + /// starts. + /// + /// The UploadInitiatedEvent is fired exactly once when + /// a transfer operation begins. The delegates attached to the event + /// will be passed information about the upload request and + /// total file size, but no progress information. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// UploadInitiatedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void uploadStarted(object sender, UploadInitiatedEventArgs args) + /// { + /// Console.WriteLine($"Upload started: {args.FilePath}"); + /// Console.WriteLine($"Total size: {args.TotalBytes} bytes"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the UploadInitiatedEvent delegate's invocation list + /// + /// TransferUtilityUploadRequest request = new TransferUtilityUploadRequest(); + /// request.UploadInitiatedEvent += uploadStarted; + /// + ///
+ public event EventHandler UploadInitiatedEvent; + + /// + /// The event for UploadCompletedEvent notifications. All + /// subscribers will be notified when a transfer operation + /// completes successfully. + /// + /// The UploadCompletedEvent is fired exactly once when + /// a transfer operation completes successfully. The delegates attached to the event + /// will be passed information about the completed upload including + /// the final response from S3 with ETag, VersionId, and other metadata. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// UploadCompletedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void uploadCompleted(object sender, UploadCompletedEventArgs args) + /// { + /// Console.WriteLine($"Upload completed: {args.FilePath}"); + /// Console.WriteLine($"Transferred: {args.TransferredBytes} bytes"); + /// Console.WriteLine($"ETag: {args.Response.ETag}"); + /// Console.WriteLine($"S3 Key: {args.Response.Key}"); + /// Console.WriteLine($"Version ID: {args.Response.VersionId}"); + /// } + /// + /// 2. Add this method to the UploadCompletedEvent delegate's invocation list + /// + /// TransferUtilityUploadRequest request = new TransferUtilityUploadRequest(); + /// request.UploadCompletedEvent += uploadCompleted; + /// + ///
+ public event EventHandler UploadCompletedEvent; + + /// + /// The event for UploadFailedEvent notifications. All + /// subscribers will be notified when a transfer operation + /// fails. + /// + /// The UploadFailedEvent is fired exactly once when + /// a transfer operation fails. The delegates attached to the event + /// will be passed information about the failed upload including + /// partial progress information, but no response data since the upload failed. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// UploadFailedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void uploadFailed(object sender, UploadFailedEventArgs args) + /// { + /// Console.WriteLine($"Upload failed: {args.FilePath}"); + /// Console.WriteLine($"Partial progress: {args.TransferredBytes} / {args.TotalBytes} bytes"); + /// var percent = (double)args.TransferredBytes / args.TotalBytes * 100; + /// Console.WriteLine($"Completion: {percent:F1}%"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the UploadFailedEvent delegate's invocation list + /// + /// TransferUtilityUploadRequest request = new TransferUtilityUploadRequest(); + /// request.UploadFailedEvent += uploadFailed; + /// + ///
+ public event EventHandler UploadFailedEvent; + + /// + /// Causes the UploadInitiatedEvent event to be fired. + /// + /// UploadInitiatedEventArgs args + internal void OnRaiseTransferInitiatedEvent(UploadInitiatedEventArgs args) + { + AWSSDKUtils.InvokeInBackground(UploadInitiatedEvent, args, this); + } + + /// + /// Causes the UploadCompletedEvent event to be fired. + /// + /// UploadCompletedEventArgs args + internal void OnRaiseTransferCompletedEvent(UploadCompletedEventArgs args) + { + AWSSDKUtils.InvokeInBackground(UploadCompletedEvent, args, this); + } + + /// + /// Causes the UploadFailedEvent event to be fired. + /// + /// UploadFailedEventArgs args + internal void OnRaiseTransferFailedEvent(UploadFailedEventArgs args) + { + AWSSDKUtils.InvokeInBackground(UploadFailedEvent, args, this); + } + /// /// Causes the UploadProgressEvent event to be fired. @@ -460,7 +587,7 @@ public class UploadProgressArgs : TransferProgressArgs /// currently transferred bytes and the /// total number of bytes to be transferred /// - /// The how many bytes were transferred since last event. + /// How many bytes were transferred since last event. /// The number of bytes transferred /// The total number of bytes to be transferred public UploadProgressArgs(long incrementTransferred, long transferred, long total) @@ -473,7 +600,7 @@ public UploadProgressArgs(long incrementTransferred, long transferred, long tota /// currently transferred bytes and the /// total number of bytes to be transferred ///
- /// The how many bytes were transferred since last event. + /// How many bytes were transferred since last event. /// The number of bytes transferred /// The total number of bytes to be transferred /// The file being uploaded @@ -487,7 +614,7 @@ public UploadProgressArgs(long incrementTransferred, long transferred, long tota /// currently transferred bytes and the /// total number of bytes to be transferred ///
- /// The how many bytes were transferred since last event. + /// How many bytes were transferred since last event. /// The number of bytes transferred /// The total number of bytes to be transferred /// A compensation for any upstream aggregators if this event to correct theit totalTransferred count, @@ -500,11 +627,164 @@ internal UploadProgressArgs(long incrementTransferred, long transferred, long to this.CompensationForRetry = compensationForRetry; } + /// + /// Constructor for upload progress with request + /// + /// How many bytes were transferred since last event. + /// The number of bytes transferred + /// The total number of bytes to be transferred + /// A compensation for any upstream aggregators if this event to correct their totalTransferred count, + /// in case the underlying request is retried. + /// The file being uploaded + /// The original TransferUtilityUploadRequest created by the user + internal UploadProgressArgs(long incrementTransferred, long transferred, long total, long compensationForRetry, string filePath, TransferUtilityUploadRequest request) + : base(incrementTransferred, transferred, total) + { + this.FilePath = filePath; + this.CompensationForRetry = compensationForRetry; + this.Request = request; + } + /// /// Gets the FilePath. /// public string FilePath { get; private set; } internal long CompensationForRetry { get; set; } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// + public TransferUtilityUploadRequest Request { get; internal set; } + } + + /// + /// Encapsulates the information needed when a transfer operation is initiated. + /// Provides access to the original request and total file size without any progress information. + /// + public class UploadInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the UploadInitiatedEventArgs class. + /// + /// The original TransferUtilityUploadRequest created by the user + /// The file being uploaded + /// The total number of bytes to be transferred + internal UploadInitiatedEventArgs(TransferUtilityUploadRequest request, string filePath, long totalBytes) + { + Request = request; + FilePath = filePath; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// Contains all the upload parameters and configuration. + /// + public TransferUtilityUploadRequest Request { get; private set; } + + /// + /// Gets the file being uploaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the total number of bytes to be transferred. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Encapsulates the information needed when a transfer operation completes successfully. + /// Provides access to the original request, final response, and completion details. + /// + public class UploadCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the UploadCompletedEventArgs class. + /// + /// The original TransferUtilityUploadRequest created by the user + /// The unified response from Transfer Utility + /// The file being uploaded + /// The total number of bytes transferred + /// The total number of bytes that were transferred (should equal transferredBytes for successful uploads). + internal UploadCompletedEventArgs(TransferUtilityUploadRequest request, TransferUtilityUploadResponse response, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + Response = response; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// Contains all the upload parameters and configuration. + /// + public TransferUtilityUploadRequest Request { get; private set; } + + /// + /// The unified response from Transfer Utility after successful upload completion. + /// Contains mapped fields from either PutObjectResponse (simple uploads) or CompleteMultipartUploadResponse (multipart uploads). + /// + public TransferUtilityUploadResponse Response { get; private set; } + + /// + /// Gets the file being uploaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the total number of bytes that were successfully transferred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes that were transferred (should equal TransferredBytes for successful uploads). + /// + public long TotalBytes { get; private set; } + } + + /// + /// Encapsulates the information needed when a transfer operation fails. + /// Provides access to the original request and partial progress information. + /// + public class UploadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the UploadFailedEventArgs class. + /// + /// The original TransferUtilityUploadRequest created by the user + /// The file being uploaded + /// The number of bytes transferred before failure + /// The total number of bytes that should have been transferred + internal UploadFailedEventArgs(TransferUtilityUploadRequest request, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// Contains all the upload parameters and configuration. + /// + public TransferUtilityUploadRequest Request { get; private set; } + + /// + /// Gets the file being uploaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the number of bytes that were transferred before the failure occurred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes that should have been transferred. + /// + public long TotalBytes { get; private set; } } } diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index cce278d328ae..427b863e95ed 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -105,6 +105,113 @@ public void SimpleUploadProgressTest() progressValidator.AssertOnCompletion(); } + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadInitiatedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\InitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(10 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + UploadWithLifecycleEvents(fileName, 10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadCompletedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\CompletedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(10 * MEG_SIZE, args.TotalBytes); + Assert.IsTrue(!string.IsNullOrEmpty(args.Response.ETag)); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + UploadWithLifecycleEvents(fileName, 10 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\FailedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(5 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + // For failed uploads, transferred bytes should be less than or equal to total bytes + Assert.IsTrue(args.TransferredBytes <= args.TotalBytes); + } + }; + + // Use invalid bucket name to force failure + var invalidBucketName = "invalid-bucket-name-" + Guid.NewGuid().ToString(); + + try + { + UploadWithLifecycleEventsAndBucket(fileName, 5 * MEG_SIZE, invalidBucketName, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for invalid bucket"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadCompleteLifecycleTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\CompleteLifecycle"); + + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(8 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(8 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + UploadWithLifecycleEvents(fileName, 8 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + [TestMethod] [TestCategory("S3")] public void SimpleUpload() @@ -375,6 +482,49 @@ public void UploadUnseekableStreamFileSizeBetweenMinPartSizeAndPartBufferSize() } } + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadProgressTotalBytesTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadProgressTotalBytes\TestFile"); + var filePath = Path.Combine(BasePath, fileName); + var fileSize = 10 * MEG_SIZE; + + // Create test file + UtilityMethods.GenerateFile(filePath, fileSize); + + var transferConfig = new TransferUtilityConfig() + { + MinSizeBeforePartUpload = 20 * MEG_SIZE, + }; + + var progressValidator = new TransferProgressValidator + { + Validate = (progress) => + { + Assert.IsTrue(progress.TotalBytes > 0, "TotalBytes should be greater than 0"); + Assert.AreEqual(fileSize, progress.TotalBytes, "TotalBytes should equal file size"); + Assert.AreEqual(filePath, progress.FilePath, "FilePath should match expected path"); + } + }; + + using (var fileTransferUtility = new TransferUtility(Client, transferConfig)) + { + var request = new TransferUtilityUploadRequest() + { + BucketName = bucketName, + FilePath = filePath, + Key = fileName + }; + + request.UploadProgressEvent += progressValidator.OnProgressEvent; + + fileTransferUtility.Upload(request); + + progressValidator.AssertOnCompletion(); + } + } + [TestMethod] [TestCategory("S3")] public void UploadUnSeekableStreamWithZeroLengthTest() @@ -1300,7 +1450,7 @@ public void AssertOnCompletion() if (this.ProgressEventException != null) throw this.ProgressEventException; - // Add some time for the background thread to finish before checking the complete + // Since AWSSDKUtils.InvokeInBackground fires the event in the background it is possible that we check too early that the event has fired. In this case, we sleep and check again. for (int retries = 1; retries < 5 && !this.IsProgressEventComplete; retries++) { Thread.Sleep(1000 * retries); @@ -1393,6 +1543,87 @@ public void OnProgressEvent(object sender, T progress) } } } + + class TransferLifecycleEventValidator + { + public Action Validate { get; set; } + public bool EventFired { get; private set; } + public Exception EventException { get; private set; } + + public void OnEventFired(object sender, T eventArgs) + { + try + { + EventFired = true; + Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); + Validate?.Invoke(eventArgs); + } + catch (Exception ex) + { + EventException = ex; + Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); + throw; + } + } + + public void AssertEventFired() + { + if (EventException != null) + throw EventException; + + // Since AWSSDKUtils.InvokeInBackground fires the event in the background it is possible that we check too early that the event has fired. In this case, we sleep and check again. + for (int retries = 1; retries < 5 && !EventFired; retries++) + { + Thread.Sleep(1000 * retries); + } + Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); + } + } + + void UploadWithLifecycleEvents(string fileName, long size, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + UploadWithLifecycleEventsAndBucket(fileName, size, bucketName, initiatedValidator, completedValidator, failedValidator); + } + + void UploadWithLifecycleEventsAndBucket(string fileName, long size, string targetBucketName, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var key = fileName; + var path = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(path, size); + + var config = new TransferUtilityConfig(); + var transferUtility = new TransferUtility(Client, config); + var request = new TransferUtilityUploadRequest + { + BucketName = targetBucketName, + FilePath = path, + Key = key, + ContentType = octetStreamContentType + }; + + if (initiatedValidator != null) + { + request.UploadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.UploadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.UploadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Upload(request); + } private class UnseekableStream : MemoryStream { private readonly bool _setZeroLengthStream; diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index 8415e9ab9962..05fbd6807f15 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -165,7 +165,11 @@ public void MapUploadRequest_PutObjectRequest_AllMappedProperties_WorkCorrectly( var simpleUploadCommand = new SimpleUploadCommand(null, null, sourceRequest); return simpleUploadCommand.ConstructRequest(); }, - usesHeadersCollection: false); + usesHeadersCollection: false, + (sourceRequest) => + { + sourceRequest.InputStream = new MemoryStream(1024); + }); } [TestMethod] From e4d97339fd275ca9dfce9d5afe4011cd4de392dc Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 23 Oct 2025 12:06:53 -0400 Subject: [PATCH 06/56] Add mapping of CompletemultipartUploadResponse to TransferUtilityUploadResponse (#4060) --- .../433a9a6d-b8ea-4676-b763-70711e8288e6.json | 11 ++++ .../Transfer/Internal/ResponseMapper.cs | 61 +++++++++++++++++++ .../EmbeddedResource/property-aliases.json | 4 ++ .../UnitTests/Custom/ResponseMapperTests.cs | 50 +++++++++++++++ 4 files changed, 126 insertions(+) create mode 100644 generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json new file mode 100644 index 000000000000..5d67e3a8b858 --- /dev/null +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added CompleteMultipartUploadResponse to TransferUtilityUploadResponse mapping" + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs index d130aee20bff..7e8505ecbf69 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -99,6 +99,67 @@ internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResp return response; } + + /// + /// Maps a CompleteMultipartUploadResponse to TransferUtilityUploadResponse. + /// Uses the field mappings defined in mapping.json "Conversion" -> "CompleteMultipartResponse" -> "UploadResponse". + /// + /// The CompleteMultipartUploadResponse to map from + /// A new TransferUtilityUploadResponse with mapped fields + internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse(CompleteMultipartUploadResponse source) + { + if (source == null) + return null; + + var response = new TransferUtilityUploadResponse(); + + // Map all fields as defined in mapping.json "Conversion" -> "CompleteMultipartResponse" -> "UploadResponse" + if (source.IsSetBucketKeyEnabled()) + response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + + if (source.IsSetChecksumCRC32()) + response.ChecksumCRC32 = source.ChecksumCRC32; + + if (source.IsSetChecksumCRC32C()) + response.ChecksumCRC32C = source.ChecksumCRC32C; + + if (source.IsSetChecksumCRC64NVME()) + response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + + if (source.IsSetChecksumSHA1()) + response.ChecksumSHA1 = source.ChecksumSHA1; + + if (source.IsSetChecksumSHA256()) + response.ChecksumSHA256 = source.ChecksumSHA256; + + if (source.ChecksumType != null) + response.ChecksumType = source.ChecksumType; + + if (source.IsSetETag()) + response.ETag = source.ETag; + + if (source.Expiration != null) + response.Expiration = source.Expiration; + + if (source.IsSetRequestCharged()) + response.RequestCharged = source.RequestCharged; + + if (source.ServerSideEncryptionMethod != null) + response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + + if (source.IsSetServerSideEncryptionKeyManagementServiceKeyId()) + response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + + if (source.IsSetVersionId()) + response.VersionId = source.VersionId; + + // Copy response metadata + response.ResponseMetadata = source.ResponseMetadata; + response.ContentLength = source.ContentLength; + response.HttpStatusCode = source.HttpStatusCode; + + return response; + } } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json index e9de0a44b4fe..97a29b7695c3 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -112,6 +112,10 @@ }, "AbortMultipartUploadRequest": { "Bucket": "BucketName" + }, + "CompleteMultipartUploadResponse": { + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" } } } \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index 05fbd6807f15..18b47e422d4d 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -487,6 +487,56 @@ public void ValidateTransferUtilityUploadResponseDefinitionCompleteness() "TransferUtilityUploadResponse"); } + [TestMethod] + [TestCategory("S3")] + public void MapCompleteMultipartUploadResponse_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "CompleteMultipartResponse", "UploadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapCompleteMultipartUploadResponse(sourceResponse); + }, + usesHeadersCollection: false, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 2048; + }, + (sourceResponse, targetResponse) => + { + Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); + Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapCompleteMultipartUploadResponse_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new CompleteMultipartUploadResponse { Expiration = null }, + + // Test null enum conversions + new CompleteMultipartUploadResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapCompleteMultipartUploadResponse(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + } + } + [TestMethod] [TestCategory("S3")] public void ValidateCompleteMultipartUploadResponseConversionCompleteness() From 923d83310cf6902827efad0cbb85177125368627 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 23 Oct 2025 12:08:13 -0400 Subject: [PATCH 07/56] Add multipartupload lifecycle tracking (#4061) --- .../433a9a6d-b8ea-4676-b763-70711e8288e3.json | 11 + .../Internal/MultipartUploadCommand.cs | 38 ++- .../_async/MultipartUploadCommand.async.cs | 51 +++- .../IntegrationTests/TransferUtilityTests.cs | 253 ++++++++++++++++++ 4 files changed, 343 insertions(+), 10 deletions(-) create mode 100644 generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json new file mode 100644 index 000000000000..1790a068cfae --- /dev/null +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent for multipart uploads." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index 644500df5bff..61ba2db64940 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -375,10 +375,46 @@ private void UploadPartProgressEventCallback(object sender, UploadProgressArgs e long transferredBytes = Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred - e.CompensationForRetry); var progressArgs = new UploadProgressArgs(e.IncrementTransferred, transferredBytes, this._contentLength, - e.CompensationForRetry, this._fileTransporterRequest.FilePath); + e.CompensationForRetry, this._fileTransporterRequest.FilePath, this._fileTransporterRequest); this._fileTransporterRequest.OnRaiseProgressEvent(progressArgs); } + private void FireTransferInitiatedEvent() + { + var initiatedArgs = new UploadInitiatedEventArgs( + request: _fileTransporterRequest, + totalBytes: _contentLength, + filePath: _fileTransporterRequest.FilePath + ); + + _fileTransporterRequest.OnRaiseTransferInitiatedEvent(initiatedArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityUploadResponse response) + { + var completedArgs = new UploadCompletedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + response: response, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferCompletedEvent(completedArgs); + } + + private void FireTransferFailedEvent() + { + var failedArgs = new UploadFailedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferFailedEvent(failedArgs); + } + /// /// /// If a checksum algorithm was not specified, we MUST add the default value used by the SDK (as the individual part diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index 74e5f6c874a8..4966c9e012e7 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -33,21 +33,33 @@ internal partial class MultipartUploadCommand : BaseCommand public override async Task ExecuteAsync(CancellationToken cancellationToken) { + // Fire transfer initiated event FIRST, before choosing path + FireTransferInitiatedEvent(); + if ( (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.InputStream.CanSeek) || this._fileTransporterRequest.ContentLength == -1) { await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); } else { - var initRequest = ConstructInitiateMultipartUploadRequest(); - var initResponse = await _s3Client.InitiateMultipartUploadAsync(initRequest, cancellationToken) + InitiateMultipartUploadResponse initResponse = null; + try + { + var initRequest = ConstructInitiateMultipartUploadRequest(); + initResponse = await _s3Client.InitiateMultipartUploadAsync(initRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - Logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); + Logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); + } + catch (Exception) + { + FireTransferFailedEvent(); + throw; + } var pendingUploadPartTasks = new List>(); - SemaphoreSlim localThrottler = null; CancellationTokenSource internalCts = null; + try { Logger.DebugFormat("Queue up the UploadPartRequests to be executed"); @@ -101,14 +113,19 @@ await localThrottler.WaitAsync(cancellationToken) Logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); var compRequest = ConstructCompleteMultipartUploadRequest(initResponse); - await this._s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken) + var completeResponse = await this._s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); Logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); + var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); + FireTransferCompletedEvent(mappedResponse); } catch (Exception e) { - Logger.Error(e, "Exception while uploading. ({0})", initResponse.UploadId); + Logger.Error(e, "Exception while uploading. ({0})", initResponse?.UploadId ?? "unknown"); + + FireTransferFailedEvent(); + // Can't do async invocation in the catch block, doing cleanup synchronously. Cleanup(initResponse.UploadId, pendingUploadPartTasks); throw; @@ -207,8 +224,19 @@ private void AbortMultipartUpload(string uploadId) } }; - var initiateRequest = ConstructInitiateMultipartUploadRequest(requestEventHandler); - var initiateResponse = await _s3Client.InitiateMultipartUploadAsync(initiateRequest, cancellationToken).ConfigureAwait(false); + InitiateMultipartUploadResponse initiateResponse = null; + + try + { + var initiateRequest = ConstructInitiateMultipartUploadRequest(requestEventHandler); + initiateResponse = await _s3Client.InitiateMultipartUploadAsync(initiateRequest, cancellationToken).ConfigureAwait(false); + } + catch (Exception ex) + { + FireTransferFailedEvent(); + Logger.Error(ex, "Failed to initiate multipart upload for unseekable stream"); + throw; + } try { @@ -276,12 +304,17 @@ private void AbortMultipartUpload(string uploadId) this._uploadResponses = uploadPartResponses; CompleteMultipartUploadRequest compRequest = ConstructCompleteMultipartUploadRequest(initiateResponse, true, requestEventHandler); - await _s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken).ConfigureAwait(false); + var completeResponse = await _s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken).ConfigureAwait(false); Logger.DebugFormat("Completed multi part upload. (Part count: {0}, Upload Id: {1})", uploadPartResponses.Count, initiateResponse.UploadId); + + var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); + FireTransferCompletedEvent(mappedResponse); } } catch (Exception ex) { + FireTransferFailedEvent(); + await _s3Client.AbortMultipartUploadAsync(new AbortMultipartUploadRequest() { BucketName = request.BucketName, diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 427b863e95ed..417d16098644 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -770,6 +770,210 @@ public void MultipartUploadProgressTest() } } + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadInitiatedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\InitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(20 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + // Use 20MB+ to trigger multipart upload + UploadWithLifecycleEvents(fileName, 20 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadCompletedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\CompletedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(25 * MEG_SIZE, args.TotalBytes); + Assert.IsTrue(!string.IsNullOrEmpty(args.Response.ETag)); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + // Use 25MB to trigger multipart upload + UploadWithLifecycleEvents(fileName, 25 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\FailedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(22 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + // For failed uploads, transferred bytes should be less than or equal to total bytes + Assert.IsTrue(args.TransferredBytes <= args.TotalBytes); + } + }; + + // Use invalid bucket name to force failure with multipart upload size + var invalidBucketName = "invalid-bucket-name-" + Guid.NewGuid().ToString(); + + try + { + // Use 22MB to trigger multipart upload + UploadWithLifecycleEventsAndBucket(fileName, 22 * MEG_SIZE, invalidBucketName, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for invalid bucket"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadCompleteLifecycleTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\CompleteLifecycle"); + + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(30 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(30 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + // Use 30MB to trigger multipart upload + UploadWithLifecycleEvents(fileName, 30 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamInitiatedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\UnseekableStreamInitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + } + }; + UploadUnseekableStreamWithLifecycleEvents(20 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamCompletedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + Assert.AreEqual(20 * MEG_SIZE, args.TransferredBytes); // since we know the actual length via testing it, we can check the transferredbytes size + } + }; + UploadUnseekableStreamWithLifecycleEvents(20 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamFailedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + } + }; + + // Use invalid bucket name to force failure with multipart upload size + var invalidBucketName = "invalid-bucket-name-" + Guid.NewGuid().ToString(); + + try + { + UploadUnseekableStreamWithLifecycleEventsAndBucket(20 * MEG_SIZE, invalidBucketName, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for invalid bucket"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamCompleteLifecycleTest() + { + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + Assert.AreEqual(18 * MEG_SIZE, args.TransferredBytes); // Should have transferred all bytes + } + }; + + UploadUnseekableStreamWithLifecycleEvents(18 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + [TestMethod] [TestCategory("S3")] public void MultipartGetNumberTest() @@ -1624,6 +1828,55 @@ void UploadWithLifecycleEventsAndBucket(string fileName, long size, string targe transferUtility.Upload(request); } + + void UploadUnseekableStreamWithLifecycleEvents(long size, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + UploadUnseekableStreamWithLifecycleEventsAndBucket(size, bucketName, initiatedValidator, completedValidator, failedValidator); + } + + void UploadUnseekableStreamWithLifecycleEventsAndBucket(long size, string targetBucketName, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var fileName = UtilityMethods.GenerateName(@"UnseekableStreamUpload\File"); + var key = fileName; + var path = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(path, size); + + // Convert file to unseekable stream + var stream = GenerateUnseekableStreamFromFile(path); + + var config = new TransferUtilityConfig(); + var transferUtility = new TransferUtility(Client, config); + var request = new TransferUtilityUploadRequest + { + BucketName = targetBucketName, + InputStream = stream, + Key = key, + ContentType = octetStreamContentType + }; + + if (initiatedValidator != null) + { + request.UploadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.UploadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.UploadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Upload(request); + } private class UnseekableStream : MemoryStream { private readonly bool _setZeroLengthStream; From 1883db91f79214204df93ca7ecdb9f8ee23ee171 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 23 Oct 2025 16:40:54 -0400 Subject: [PATCH 08/56] fix silently failing test (#4073) --- .../Services/S3/IntegrationTests/TransferUtilityTests.cs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 417d16098644..6aaa7ae3a5e3 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -909,7 +909,7 @@ public void MultipartUploadUnseekableStreamCompletedEventTest() Assert.IsNotNull(args.Request); Assert.IsNotNull(args.Response); Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length - Assert.AreEqual(20 * MEG_SIZE, args.TransferredBytes); // since we know the actual length via testing it, we can check the transferredbytes size + Assert.AreEqual(0, args.TransferredBytes); // unseekable streams we dont attach and progress listeners so we wont have transferredbytes. } }; UploadUnseekableStreamWithLifecycleEvents(20 * MEG_SIZE, null, eventValidator, null); @@ -964,7 +964,7 @@ public void MultipartUploadUnseekableStreamCompleteLifecycleTest() Assert.IsNotNull(args.Request); Assert.IsNotNull(args.Response); Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length - Assert.AreEqual(18 * MEG_SIZE, args.TransferredBytes); // Should have transferred all bytes + Assert.AreEqual(0, args.TransferredBytes); // unseekable streams we dont attach and progress listeners so we wont have transferredbytes. } }; @@ -1758,15 +1758,16 @@ public void OnEventFired(object sender, T eventArgs) { try { - EventFired = true; Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); Validate?.Invoke(eventArgs); + EventFired = true; // Only set if validation passes } catch (Exception ex) { EventException = ex; + EventFired = false; // Ensure we don't mark as fired on failure Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); - throw; + // Don't re-throw, let AssertEventFired() handle it } } From d1b3919f7b62b70fee4150645213bce2db74a3ce Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 24 Oct 2025 18:21:15 -0400 Subject: [PATCH 09/56] Add ContentLanguage to header collection of GetObjectResponse. (#4074) --- .../c49077d9-90b3-437f-b316-6d8d8833ae72.json | 11 +++++ .../GetObjectResponseUnmarshaller.cs | 1 + .../S3/IntegrationTests/GetObjectTests.cs | 45 +++++++++++++++++++ 3 files changed, 57 insertions(+) create mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json new file mode 100644 index 000000000000..ee368d5cf126 --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Add ContentLanguage to header collection of GetObjectResponse." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs b/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs index 410f93fd4342..171aebac022e 100644 --- a/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs +++ b/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs @@ -87,6 +87,7 @@ private static void UnmarshallResult(XmlUnmarshallerContext context,GetObjectRes response.Headers.ContentEncoding = S3Transforms.ToString(responseData.GetHeaderValue("Content-Encoding")); if (responseData.IsHeaderPresent("Content-Language")) response.ContentLanguage = S3Transforms.ToString(responseData.GetHeaderValue("Content-Language")); + response.Headers.ContentLanguage = S3Transforms.ToString(responseData.GetHeaderValue("Content-Language")); if (responseData.IsHeaderPresent("Content-Length")) response.Headers.ContentLength = long.Parse(responseData.GetHeaderValue("Content-Length"), CultureInfo.InvariantCulture); if (responseData.IsHeaderPresent("x-amz-object-lock-legal-hold")) diff --git a/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs b/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs index 6cba8fc6b989..f4c8c103e126 100644 --- a/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs @@ -246,5 +246,50 @@ public void TestContentLanguageResponseHeaderOverride() "Original ContentLanguage should still be stored when no override is specified"); } } + + [TestMethod] + [TestCategory("S3")] + public void TestContentLanguageHeadersCollection() + { + var key = "TestContentLanguageHeadersCollection"; + var expectedLanguage = "de-DE"; + + // Put object with Content-Language header + var putRequest = new PutObjectRequest + { + BucketName = bucketName, + Key = key, + ContentBody = "Test content for Content-Language headers collection" + }; + putRequest.Headers["Content-Language"] = expectedLanguage; + + Client.PutObject(putRequest); + + // Get object and verify both ContentLanguage properties are set + var response = Client.GetObject(new GetObjectRequest + { + BucketName = bucketName, + Key = key + }); + + using (response) + { + // Verify the direct ContentLanguage property + Assert.IsNotNull(response.ContentLanguage, + "ContentLanguage property should not be null"); + Assert.AreEqual(expectedLanguage, response.ContentLanguage, + "ContentLanguage property should match the value set during PutObject"); + + // Verify the Headers.ContentLanguage property + Assert.IsNotNull(response.Headers.ContentLanguage, + "Headers.ContentLanguage property should not be null"); + Assert.AreEqual(expectedLanguage, response.Headers.ContentLanguage, + "Headers.ContentLanguage property should match the value set during PutObject"); + + // Verify both properties have the same value + Assert.AreEqual(response.ContentLanguage, response.Headers.ContentLanguage, + "ContentLanguage and Headers.ContentLanguage should have the same value"); + } + } } } From 5afd0e6626ef7e6c018a2552570fa5d2c054e890 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Tue, 28 Oct 2025 09:06:44 -0400 Subject: [PATCH 10/56] ignore isset (#4082) --- .../Transfer/Internal/ResponseMapper.cs | 114 +++++------------- 1 file changed, 29 insertions(+), 85 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs index 7e8505ecbf69..f31afc9bc4ed 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -44,53 +44,22 @@ internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResp var response = new TransferUtilityUploadResponse(); // Map all fields as defined in mapping.json "Conversion" -> "PutObjectResponse" -> "UploadResponse" - if (source.IsSetBucketKeyEnabled()) - response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); - - if (source.IsSetChecksumCRC32()) - response.ChecksumCRC32 = source.ChecksumCRC32; - - if (source.IsSetChecksumCRC32C()) - response.ChecksumCRC32C = source.ChecksumCRC32C; - - if (source.IsSetChecksumCRC64NVME()) - response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; - - if (source.IsSetChecksumSHA1()) - response.ChecksumSHA1 = source.ChecksumSHA1; - - if (source.IsSetChecksumSHA256()) - response.ChecksumSHA256 = source.ChecksumSHA256; - - if (source.IsSetChecksumType()) - response.ChecksumType = source.ChecksumType; - - if (source.IsSetETag()) - response.ETag = source.ETag; - - if (source.Expiration != null) - response.Expiration = source.Expiration; - - if (source.IsSetRequestCharged()) - response.RequestCharged = source.RequestCharged; - - if (source.ServerSideEncryptionCustomerMethod != null) - response.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; - - if (source.ServerSideEncryptionCustomerProvidedKeyMD5 != null) - response.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; - - if (source.ServerSideEncryptionKeyManagementServiceEncryptionContext != null) - response.ServerSideEncryptionKeyManagementServiceEncryptionContext = source.ServerSideEncryptionKeyManagementServiceEncryptionContext; - - if (source.IsSetServerSideEncryptionKeyManagementServiceKeyId()) - response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; - - if (source.ServerSideEncryptionMethod != null) - response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; - - if (source.IsSetVersionId()) - response.VersionId = source.VersionId; + response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + response.ChecksumCRC32 = source.ChecksumCRC32; + response.ChecksumCRC32C = source.ChecksumCRC32C; + response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + response.ChecksumSHA1 = source.ChecksumSHA1; + response.ChecksumSHA256 = source.ChecksumSHA256; + response.ChecksumType = source.ChecksumType; + response.ETag = source.ETag; + response.Expiration = source.Expiration; + response.RequestCharged = source.RequestCharged; + response.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; + response.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; + response.ServerSideEncryptionKeyManagementServiceEncryptionContext = source.ServerSideEncryptionKeyManagementServiceEncryptionContext; + response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + response.VersionId = source.VersionId; // Copy response metadata response.ResponseMetadata = source.ResponseMetadata; @@ -114,44 +83,19 @@ internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse var response = new TransferUtilityUploadResponse(); // Map all fields as defined in mapping.json "Conversion" -> "CompleteMultipartResponse" -> "UploadResponse" - if (source.IsSetBucketKeyEnabled()) - response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); - - if (source.IsSetChecksumCRC32()) - response.ChecksumCRC32 = source.ChecksumCRC32; - - if (source.IsSetChecksumCRC32C()) - response.ChecksumCRC32C = source.ChecksumCRC32C; - - if (source.IsSetChecksumCRC64NVME()) - response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; - - if (source.IsSetChecksumSHA1()) - response.ChecksumSHA1 = source.ChecksumSHA1; - - if (source.IsSetChecksumSHA256()) - response.ChecksumSHA256 = source.ChecksumSHA256; - - if (source.ChecksumType != null) - response.ChecksumType = source.ChecksumType; - - if (source.IsSetETag()) - response.ETag = source.ETag; - - if (source.Expiration != null) - response.Expiration = source.Expiration; - - if (source.IsSetRequestCharged()) - response.RequestCharged = source.RequestCharged; - - if (source.ServerSideEncryptionMethod != null) - response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; - - if (source.IsSetServerSideEncryptionKeyManagementServiceKeyId()) - response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; - - if (source.IsSetVersionId()) - response.VersionId = source.VersionId; + response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + response.ChecksumCRC32 = source.ChecksumCRC32; + response.ChecksumCRC32C = source.ChecksumCRC32C; + response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + response.ChecksumSHA1 = source.ChecksumSHA1; + response.ChecksumSHA256 = source.ChecksumSHA256; + response.ChecksumType = source.ChecksumType; + response.ETag = source.ETag; + response.Expiration = source.Expiration; + response.RequestCharged = source.RequestCharged; + response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + response.VersionId = source.VersionId; // Copy response metadata response.ResponseMetadata = source.ResponseMetadata; From a62545e7d60f3ac072d2013e1111d328bfe901ef Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 29 Oct 2025 10:11:23 -0400 Subject: [PATCH 11/56] Add DownloadResponse mapping (#4075) --- .../7f23582e-3225-487b-83e7-167cf17cb231.json | 11 + .../Transfer/Internal/ResponseMapper.cs | 57 ++++ .../TransferUtilityDownloadResponse.cs | 300 ++++++++++++++++++ .../EmbeddedResource/property-aliases.json | 14 + .../UnitTests/Custom/ResponseMapperTests.cs | 103 +++++- 5 files changed, 470 insertions(+), 15 deletions(-) create mode 100644 generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs diff --git a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json new file mode 100644 index 000000000000..564bb1cd65d5 --- /dev/null +++ b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Add GetObjectResponse to TransferUtilityDownloadResponse mapping." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs index f31afc9bc4ed..1e337635db81 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -20,6 +20,7 @@ * */ +using System.Collections.Generic; using Amazon.S3.Model; namespace Amazon.S3.Transfer.Internal @@ -104,6 +105,62 @@ internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse return response; } + + /// + /// Maps a GetObjectResponse to TransferUtilityDownloadResponse. + /// Uses the field mappings defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse". + /// + /// The GetObjectResponse to map from + /// A new TransferUtilityDownloadResponse with mapped fields + internal static TransferUtilityDownloadResponse MapGetObjectResponse(GetObjectResponse source) + { + if (source == null) + return null; + + var response = new TransferUtilityDownloadResponse(); + + // Map all fields as defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse" + response.AcceptRanges = source.AcceptRanges; + response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + response.ChecksumCRC32 = source.ChecksumCRC32; + response.ChecksumCRC32C = source.ChecksumCRC32C; + response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + response.ChecksumSHA1 = source.ChecksumSHA1; + response.ChecksumSHA256 = source.ChecksumSHA256; + response.ChecksumType = source.ChecksumType; + response.ContentRange = source.ContentRange; + response.Headers = source.Headers; + response.DeleteMarker = source.DeleteMarker; + response.ETag = source.ETag; + response.Expiration = source.Expiration; + response.ExpiresString = source.ExpiresString; + response.LastModified = source.LastModified; + response.Metadata = source.Metadata; + response.MissingMeta = source.MissingMeta; + response.ObjectLockLegalHoldStatus = source.ObjectLockLegalHoldStatus; + response.ObjectLockMode = source.ObjectLockMode; + response.ObjectLockRetainUntilDate = source.ObjectLockRetainUntilDate; + response.PartsCount = source.PartsCount; + response.ReplicationStatus = source.ReplicationStatus; + response.RequestCharged = source.RequestCharged; + response.RestoreExpiration = source.RestoreExpiration; + response.RestoreInProgress = source.RestoreInProgress; + response.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; + response.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; + response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + response.StorageClass = source.StorageClass; + response.TagCount = source.TagCount; + response.VersionId = source.VersionId; + response.WebsiteRedirectLocation = source.WebsiteRedirectLocation; + + // Copy response metadata + response.ResponseMetadata = source.ResponseMetadata; + response.ContentLength = source.ContentLength; + response.HttpStatusCode = source.HttpStatusCode; + + return response; + } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs new file mode 100644 index 000000000000..d10c72f47c0f --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs @@ -0,0 +1,300 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Generic; +using Amazon.Runtime; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility download operations. + /// Contains response metadata from download operations. + /// + public class TransferUtilityDownloadResponse : AmazonWebServiceResponse + { + /// + /// Gets and sets the AcceptRanges property. + /// + public string AcceptRanges { get; set; } + + /// + /// Gets and sets the property BucketKeyEnabled. + /// + /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with + /// Amazon Web Services KMS (SSE-KMS). + /// + /// + public bool? BucketKeyEnabled { get; set; } + + /// + /// The collection of headers for the response. + /// + public HeadersCollection Headers { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32. + /// + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. + /// + /// + public string ChecksumCRC32 { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32C. + /// + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. + /// + /// + public string ChecksumCRC32C { get; set; } + + /// + /// Gets and sets the property ChecksumCRC64NVME. + /// + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. + /// + /// + public string ChecksumCRC64NVME { get; set; } + + /// + /// Gets and sets the property ChecksumSHA1. + /// + /// The Base64 encoded, 160-bit SHA-1 digest of the object. + /// + /// + public string ChecksumSHA1 { get; set; } + + /// + /// Gets and sets the property ChecksumSHA256. + /// + /// The Base64 encoded, 256-bit SHA-256 checksum of the object. + /// + /// + public string ChecksumSHA256 { get; set; } + + /// + /// Gets and sets the property ChecksumType. + /// + /// The checksum type used to calculate the object-level checksum. + /// + /// + public ChecksumType ChecksumType { get; set; } + + /// + /// Gets and sets the ContentRange property. + /// + public string ContentRange { get; set; } + + /// + /// Gets and sets the DeleteMarker property. + /// + /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + /// + /// + public string DeleteMarker { get; set; } + + /// + /// Gets and sets the ETag property. + /// + /// An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. + /// + /// + public string ETag { get; set; } + + /// + /// Gets and sets the property Expiration. + /// + /// If the object expiration is configured, this will contain the expiration date and rule ID. + /// + /// + public Expiration Expiration { get; set; } + + /// + /// Gets and sets the ExpiresString property. + /// + /// The date and time at which the object is no longer cacheable (string format). + /// + /// + public string ExpiresString { get; set; } + + /// + /// Gets and sets the property LastModified. + /// + /// Date and time when the object was last modified. + /// + /// + public DateTime? LastModified { get; set; } + + /// + /// Gets and sets the Metadata property. + /// + /// The collection of metadata for the object. + /// + /// + public MetadataCollection Metadata { get; set; } + + /// + /// Gets and sets the property MissingMeta. + /// + /// This is set to the number of metadata entries not returned in the headers that are + /// prefixed with x-amz-meta-. + /// + /// + public int? MissingMeta { get; set; } + + /// + /// Gets and sets the property ObjectLockLegalHoldStatus. + /// + /// Indicates whether this object has an active legal hold. + /// + /// + public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus { get; set; } + + /// + /// Gets and sets the property ObjectLockMode. + /// + /// The Object Lock mode that's currently in place for this object. + /// + /// + public ObjectLockMode ObjectLockMode { get; set; } + + /// + /// Gets and sets the property ObjectLockRetainUntilDate. + /// + /// The date and time when this object's Object Lock will expire. + /// + /// + public DateTime? ObjectLockRetainUntilDate { get; set; } + + /// + /// Gets and sets the PartsCount property. + /// + /// The number of parts this object has. + /// + /// + public int? PartsCount { get; set; } + + /// + /// Gets and sets the property ReplicationStatus. + /// + /// Amazon S3 can return this if your request involves a bucket that is either a source + /// or destination in a replication rule. + /// + /// + public ReplicationStatus ReplicationStatus { get; set; } + + /// + /// Gets and sets the RequestCharged property. + /// + /// If present, indicates that the requester was successfully charged for the request. + /// + /// + public RequestCharged RequestCharged { get; set; } + + /// + /// Gets and sets the RestoreExpiration property. + /// + /// RestoreExpiration will be set for objects that have been restored from Amazon Glacier. + /// It indicates for those objects how long the restored object will exist. + /// + /// + public DateTime? RestoreExpiration { get; set; } + + /// + /// Gets and sets the RestoreInProgress + /// + /// Will be true when the object is in the process of being restored from Amazon Glacier. + /// + /// + /// This functionality is not supported for directory buckets. + /// Only the S3 Express One Zone storage class is supported by directory buckets to store objects. + /// + /// + public bool? RestoreInProgress { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerMethod property. + /// + /// The server-side encryption algorithm to be used with the customer provided key. + /// + /// + public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerProvidedKeyMD5 property. + /// + /// The MD5 server-side encryption of the customer-provided encryption key. + /// + /// + public string ServerSideEncryptionCustomerProvidedKeyMD5 { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionKeyManagementServiceKeyId property. + /// + /// If present, indicates the ID of the KMS key that was used for object encryption. + /// + /// + public string ServerSideEncryptionKeyManagementServiceKeyId { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionMethod property. + /// + /// The server-side encryption algorithm used when you store this object in Amazon S3. + /// + /// + public ServerSideEncryptionMethod ServerSideEncryptionMethod { get; set; } + + /// + /// Gets and sets the property StorageClass. + /// + /// Provides storage class information of the object. + /// + /// + public S3StorageClass StorageClass { get; set; } + + /// + /// Gets and sets the property TagCount. + /// + /// The number of tags, if any, on the object. + /// + /// + public int TagCount { get; set; } + + /// + /// Gets and sets the property VersionId. + /// + /// Version ID of the object. + /// + /// + public string VersionId { get; set; } + + /// + /// Gets and sets the property WebsiteRedirectLocation. + /// + /// If the bucket is configured as a website, redirects requests for this object to another + /// object in the same bucket or to an external URL. + /// + /// + public string WebsiteRedirectLocation { get; set; } + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json index 97a29b7695c3..dd4e8c3ce2d7 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -116,6 +116,20 @@ "CompleteMultipartUploadResponse": { "ServerSideEncryption": "ServerSideEncryptionMethod", "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" + }, + "GetObjectResponse": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "Restore": "RestoreExpiration" + }, + "TransferUtilityDownloadResponse": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "Restore": "RestoreExpiration" } } } \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index 18b47e422d4d..ab14986d917f 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -319,36 +319,59 @@ private void ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "GetObjectResponse", "DownloadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapGetObjectResponse(sourceResponse); + }, + usesHeadersCollection: true, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 2048; + }, + (sourceResponse, targetResponse) => + { + Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); + Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponse_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new GetObjectResponse { Expiration = null }, + + // Test null enum conversions + new GetObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapGetObjectResponse(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + } + } + [TestMethod] [TestCategory("S3")] public void ValidateGetObjectRequestDefinitionCompleteness() From 2b2053103f6c0a4b484171704dc104c01ae567d4 Mon Sep 17 00:00:00 2001 From: Philippe El Asmar <53088140+philasmar@users.noreply.github.com> Date: Wed, 29 Oct 2025 10:27:55 -0400 Subject: [PATCH 12/56] Add additional validation to UploadPartRequests in Transfer Utility (#4083) --- .../49ef8a70-bb30-4cc4-a8b5-92de4f6068c1.json | 12 ++ .../Internal/MultipartUploadCommand.cs | 20 ++- .../_async/MultipartUploadCommand.async.cs | 77 +++++++++- .../Custom/MultipartUploadValidationTests.cs | 133 ++++++++++++++++++ 4 files changed, 237 insertions(+), 5 deletions(-) create mode 100644 generator/.DevConfigs/49ef8a70-bb30-4cc4-a8b5-92de4f6068c1.json create mode 100644 sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs diff --git a/generator/.DevConfigs/49ef8a70-bb30-4cc4-a8b5-92de4f6068c1.json b/generator/.DevConfigs/49ef8a70-bb30-4cc4-a8b5-92de4f6068c1.json new file mode 100644 index 000000000000..677493821a28 --- /dev/null +++ b/generator/.DevConfigs/49ef8a70-bb30-4cc4-a8b5-92de4f6068c1.json @@ -0,0 +1,12 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Fixed issue where PartSize and IsLastPart fields were not properly set on Transfer Utility Upload Part Request.", + "Add additional validations for Transfer Utility requests to ensure Upload Parts have the proper Content Length and File Offsets." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index 61ba2db64940..dca8f3f076ac 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -49,7 +49,6 @@ internal partial class MultipartUploadCommand : BaseCommand long _totalTransferredBytes; Queue _partsToUpload = new Queue(); - long _contentLength; private static Logger Logger { @@ -211,17 +210,30 @@ internal CompleteMultipartUploadRequest ConstructCompleteMultipartUploadRequest( return compRequest; } + private bool calculateIsLastPart(long remainingBytes) + { + var isLastPart = false; + if (remainingBytes <= this._partSize) + isLastPart = true; + return isLastPart; + } + internal UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePosition, InitiateMultipartUploadResponse initiateResponse) { UploadPartRequest uploadPartRequest = ConstructGenericUploadPartRequest(initiateResponse); + // Calculating how many bytes are remaining to be uploaded from the current part. + // This is mainly used for the last part scenario. + var remainingBytes = this._contentLength - filePosition; + // We then check based on the remaining bytes and the content length if this is the last part. + var isLastPart = calculateIsLastPart(remainingBytes); uploadPartRequest.PartNumber = partNumber; - uploadPartRequest.PartSize = this._partSize; + uploadPartRequest.PartSize = isLastPart ? remainingBytes : this._partSize; + uploadPartRequest.IsLastPart = isLastPart; - if ((filePosition + this._partSize >= this._contentLength) + if (isLastPart && _s3Client is Amazon.S3.Internal.IAmazonS3Encryption) { - uploadPartRequest.IsLastPart = true; uploadPartRequest.PartSize = 0; } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index 4966c9e012e7..dd747e3d83e8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -31,6 +31,8 @@ internal partial class MultipartUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } + Dictionary _expectedUploadParts = new Dictionary(); + public override async Task ExecuteAsync(CancellationToken cancellationToken) { // Fire transfer initiated event FIRST, before choosing path @@ -69,6 +71,29 @@ public override async Task ExecuteAsync(CancellationToken cancellationToken) cancellationToken.ThrowIfCancellationRequested(); var uploadRequest = ConstructUploadPartRequest(i, filePosition, initResponse); + + var expectedFileOffset = (i - 1) * this._partSize; + // Calculating how many bytes are remaining to be uploaded from the current part. + // This is mainly used for the last part scenario. + var remainingBytes = this._contentLength - expectedFileOffset; + // We then check based on the remaining bytes and the content length if this is the last part. + var isLastPart = calculateIsLastPart(remainingBytes); + // To maintain the same behavior as the ConstructUploadPartRequest. + // We are setting the remainingBytes/partSize when using the IAmazonS3Encryption client to 0. + if (isLastPart + && _s3Client is Amazon.S3.Internal.IAmazonS3Encryption) + { + remainingBytes = 0; + } + this._expectedUploadParts.Add(i, new ExpectedUploadPart { + PartNumber = i, + ExpectedContentLength = + isLastPart ? + remainingBytes : + this._partSize, + ExpectedFileOffset = expectedFileOffset, + IsLastPart = isLastPart + }); this._partsToUpload.Enqueue(uploadRequest); filePosition += this._partSize; } @@ -150,8 +175,50 @@ private async Task UploadPartAsync(UploadPartRequest uploadR { try { - return await _s3Client.UploadPartAsync(uploadRequest, internalCts.Token) + var response = await _s3Client.UploadPartAsync(uploadRequest, internalCts.Token) .ConfigureAwait(continueOnCapturedContext: false); + + if (response.PartNumber is null) + { + throw new ArgumentNullException(nameof(response.PartNumber)); + } + else + { + if (this._expectedUploadParts.TryGetValue((int) response.PartNumber, out var expectedUploadPart)) + { + var actualContentLength = uploadRequest.PartSize; + if (actualContentLength != expectedUploadPart.ExpectedContentLength) + { + throw new InvalidOperationException($"Cannot complete multipart upload request. The expected content length of part {expectedUploadPart.PartNumber} " + + $"does not equal the actual content length."); + } + + if (expectedUploadPart.IsLastPart) + { + if (actualContentLength < 0 || + actualContentLength > expectedUploadPart.ExpectedContentLength) + { + throw new InvalidOperationException($"Cannot complete multipart upload request. The last part " + + $"has an invalid content length."); + } + } + + var actualFileOsset = uploadRequest.FilePosition; + if (uploadRequest.IsSetFilePath() && + actualFileOsset != expectedUploadPart.ExpectedFileOffset) + { + throw new InvalidOperationException($"Cannot complete multipart upload request. The expected file offset of part {expectedUploadPart.PartNumber} " + + $"does not equal the actual file offset."); + } + } + else + { + throw new InvalidOperationException("Multipart upload request part was unexpected."); + } + } + + + return response; } catch (Exception exception) { @@ -326,5 +393,13 @@ await _s3Client.AbortMultipartUploadAsync(new AbortMultipartUploadRequest() throw; } } + + private class ExpectedUploadPart + { + public int PartNumber { get; set; } + public long? ExpectedContentLength { get; set; } + public long? ExpectedFileOffset { get; set; } + public bool IsLastPart { get; set; } + } } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs new file mode 100644 index 000000000000..19ea71304c8f --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs @@ -0,0 +1,133 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Amazon.S3.Util; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class MultipartUploadValidationTests + { + private static string _tempFilePath; + private const long fileSizeInBytes = 40 * 1024 * 1024; + + [ClassInitialize] + public static void ClassInitialize(TestContext context) + { + _tempFilePath = Path.GetTempFileName(); + + CreateFileWithSpecificSize(_tempFilePath, fileSizeInBytes); + } + + [ClassCleanup] + public static void ClassCleanup() + { + if (File.Exists(_tempFilePath)) + { + File.Delete(_tempFilePath); + } + } + + private static void CreateFileWithSpecificSize(string path, long size) + { + using (var fileStream = new FileStream(path, FileMode.Create, FileAccess.Write)) + { + fileStream.SetLength(size); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task Validation_HappyPath() + { + var initiateMultipartUploadResponse = new InitiateMultipartUploadResponse + { + UploadId = "test" + }; + + var s3Client = new Mock(); + s3Client + .Setup(x => x.InitiateMultipartUploadAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(initiateMultipartUploadResponse); + + s3Client + .Setup(x => x.UploadPartAsync(It.IsAny(), It.IsAny())) + .ReturnsAsync((UploadPartRequest request, CancellationToken cancellationToken) => + { + return new UploadPartResponse { PartNumber = request.PartNumber }; + }); + + var uploadRequest = new TransferUtilityUploadRequest + { + FilePath = _tempFilePath, + BucketName = "test-bucket", + Key = "test" + }; + var multipartUpload = new MultipartUploadCommand(s3Client.Object, new TransferUtilityConfig(), uploadRequest); + await multipartUpload.ExecuteAsync(new CancellationToken()); + } + + [TestMethod] + [TestCategory("S3")] + public void Validation_ConstructUploadPartRequest() + { + var initiateMultipartUploadResponse = new InitiateMultipartUploadResponse + { + UploadId = "test" + }; + + var s3Client = new Mock(); + + s3Client + .Setup(x => x.InitiateMultipartUploadAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(initiateMultipartUploadResponse); + + var uploadRequest = new TransferUtilityUploadRequest + { + FilePath = _tempFilePath, + BucketName = "test-bucket", + Key = "test" + }; + + var multipartUpload = new MultipartUploadCommand(s3Client.Object, new TransferUtilityConfig(), uploadRequest); + + var partSize = Math.Max(S3Constants.DefaultPartSize, uploadRequest.ContentLength / S3Constants.MaxNumberOfParts); + + long filePosition = 0; + for (int i = 1; filePosition < uploadRequest.ContentLength; i++) + { + var constructUploadPartRequest = multipartUpload.ConstructUploadPartRequest(i, filePosition, initiateMultipartUploadResponse); + + var expectedFileOffset = (i - 1) * partSize; + var remainingBytes = uploadRequest.ContentLength - expectedFileOffset; + var isLastPart = false; + if (remainingBytes <= partSize) + isLastPart = true; + + Assert.AreEqual(i, constructUploadPartRequest.PartNumber); + Assert.AreEqual(isLastPart, constructUploadPartRequest.IsLastPart); + Assert.AreEqual( + isLastPart ? remainingBytes : partSize, + constructUploadPartRequest.PartSize); + Assert.AreEqual(expectedFileOffset, constructUploadPartRequest.FilePosition); + + filePosition += partSize; + } + + } + } +} From 1f87b13bf0fb8d87f99cbb1763462d32bea7989d Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 3 Nov 2025 09:27:48 -0500 Subject: [PATCH 13/56] Update Response mapping logic for PutObjectResponse and CompleteMultipartResponse (#4086) --- .../7f23582e-3225-487b-83e7-167cf17cb238.json | 11 ++ .../Transfer/Internal/ResponseMapper.cs | 4 + .../Transfer/TransferUtilityUploadResponse.cs | 108 ++++++++++++++++++ .../Custom/EmbeddedResource/mapping.json | 34 +++++- .../EmbeddedResource/property-aliases.json | 2 + .../UnitTests/Custom/ResponseMapperTests.cs | 12 +- 6 files changed, 166 insertions(+), 5 deletions(-) create mode 100644 generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json diff --git a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json new file mode 100644 index 000000000000..04d1122899c8 --- /dev/null +++ b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Update Response mapping logic for PutObjectResponse and CompleteMultipartResponse" + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs index 1e337635db81..302ffd5e2fce 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -61,6 +61,7 @@ internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResp response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; response.VersionId = source.VersionId; + response.Size = source.Size; // Copy response metadata response.ResponseMetadata = source.ResponseMetadata; @@ -97,6 +98,9 @@ internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; response.VersionId = source.VersionId; + response.BucketName = source.BucketName; + response.Key = source.Key; + response.Location = source.Location; // Copy response metadata response.ResponseMetadata = source.ResponseMetadata; diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs index 3fcc20294a0a..fc3218f0411e 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs @@ -35,6 +35,7 @@ namespace Amazon.S3.Transfer public class TransferUtilityUploadResponse : AmazonWebServiceResponse { private bool? _bucketKeyEnabled; + private string _bucketName; private string _checksumCRC32; private string _checksumCRC32C; private string _checksumCRC64NVME; @@ -43,8 +44,11 @@ public class TransferUtilityUploadResponse : AmazonWebServiceResponse private ChecksumType _checksumType; private string _etag; private Expiration _expiration; + private string _key; + private string _location; private RequestCharged _requestCharged; private ServerSideEncryptionCustomerMethod _serverSideEncryptionCustomerMethod; + private long? _size; private string _sseCustomerKeyMD5; private string _sseKmsEncryptionContext; private string _sseKmsKeyId; @@ -73,6 +77,110 @@ internal bool IsSetBucketKeyEnabled() return this._bucketKeyEnabled.HasValue; } + /// + /// Gets and sets the property BucketName. + /// + /// The name of the bucket that contains the newly created object. Does not return the + /// access point ARN or access point alias if used. + /// + /// + /// + /// When using this action with an access point, you must direct requests to the access + /// point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + /// When using this action with an access point through the Amazon Web Services SDKs, + /// you provide the access point ARN in place of the bucket name. For more information + /// about access point ARNs, see Using + /// access points in the Amazon S3 User Guide. + /// + /// + /// + /// When you use this action with Amazon S3 on Outposts, you must direct requests to the + /// S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + /// When you use this action with S3 on Outposts through the Amazon Web Services SDKs, + /// you provide the Outposts access point ARN in place of the bucket name. For more information + /// about S3 on Outposts ARNs, see What + /// is S3 on Outposts? in the Amazon S3 User Guide. + /// + /// + public string BucketName + { + get { return this._bucketName; } + set { this._bucketName = value; } + } + + /// + /// Checks if BucketName property is set. + /// + /// true if BucketName property is set. + internal bool IsSetBucketName() + { + return !string.IsNullOrEmpty(this._bucketName); + } + + /// + /// Gets and sets the property Key. + /// + /// The object key of the newly created object. + /// + /// + public string Key + { + get { return this._key; } + set { this._key = value; } + } + + /// + /// Checks if Key property is set. + /// + /// true if Key property is set. + internal bool IsSetKey() + { + return !string.IsNullOrEmpty(this._key); + } + + /// + /// Gets and sets the property Location. + /// + /// The URI that identifies the newly created object. + /// + /// + public string Location + { + get { return this._location; } + set { this._location = value; } + } + + /// + /// Checks if Location property is set. + /// + /// true if Location property is set. + internal bool IsSetLocation() + { + return !string.IsNullOrEmpty(this._location); + } + + /// + /// Gets and sets the property Size. + /// + /// The size of the object in bytes. This will only be present if you append to an object. + /// + /// This functionality is only supported for objects in the S3 Express One Zone storage class in directory buckets. + /// + public long? Size + { + get { return this._size; } + set { this._size = value; } + } + + /// + /// Checks if Size property is set. + /// + /// true if Size property is set. + internal bool IsSetSize() + { + return this._size.HasValue; + } + /// /// Gets and sets the property ChecksumCRC32. /// diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json index 224a0a35dfdb..6e34b8b1c2de 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json @@ -43,6 +43,7 @@ }, "UploadResponse": { "PutObjectResponse": [ + "Bucket", "BucketKeyEnabled", "ChecksumCRC32", "ChecksumCRC32C", @@ -52,12 +53,15 @@ "ChecksumType", "ETag", "Expiration", + "Key", + "Location", "RequestCharged", "SSECustomerAlgorithm", "SSECustomerKeyMD5", "SSEKMSEncryptionContext", "SSEKMSKeyId", "ServerSideEncryption", + "Size", "VersionId" ] }, @@ -129,19 +133,43 @@ "Conversion": { "UploadRequest": { "PutObjectRequest": [ + "ACL", "Bucket", + "BucketKeyEnabled", + "CacheControl", "ChecksumAlgorithm", "ChecksumCRC32", "ChecksumCRC32C", "ChecksumCRC64NVME", "ChecksumSHA1", "ChecksumSHA256", + "ContentDisposition", + "ContentEncoding", + "ContentLanguage", + "ContentType", "ExpectedBucketOwner", + "Expires", + "GrantFullControl", + "GrantRead", + "GrantReadACP", + "GrantWriteACP", + "IfMatch", + "IfNoneMatch", "Key", + "Metadata", + "ObjectLockLegalHoldStatus", + "ObjectLockMode", + "ObjectLockRetainUntilDate", "RequestPayer", "SSECustomerAlgorithm", "SSECustomerKey", - "SSECustomerKeyMD5" + "SSECustomerKeyMD5", + "SSEKMSEncryptionContext", + "SSEKMSKeyId", + "ServerSideEncryption", + "StorageClass", + "Tagging", + "WebsiteRedirectLocation" ], "CreateMultipartRequest": [ "ACL", @@ -210,6 +238,7 @@ }, "CompleteMultipartResponse": { "UploadResponse": [ + "Bucket", "BucketKeyEnabled", "ChecksumCRC32", "ChecksumCRC32C", @@ -219,6 +248,8 @@ "ChecksumType", "ETag", "Expiration", + "Key", + "Location", "RequestCharged", "SSEKMSKeyId", "ServerSideEncryption", @@ -242,6 +273,7 @@ "SSEKMSEncryptionContext", "SSEKMSKeyId", "ServerSideEncryption", + "Size", "VersionId" ] }, diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json index dd4e8c3ce2d7..63216442578e 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -8,6 +8,7 @@ "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" }, "TransferUtilityUploadResponse": { + "Bucket" : "BucketName", "ServerSideEncryption": "ServerSideEncryptionMethod", "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", @@ -114,6 +115,7 @@ "Bucket": "BucketName" }, "CompleteMultipartUploadResponse": { + "Bucket" : "BucketName", "ServerSideEncryption": "ServerSideEncryptionMethod", "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" }, diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index ab14986d917f..5f332443d0a5 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -165,7 +165,7 @@ public void MapUploadRequest_PutObjectRequest_AllMappedProperties_WorkCorrectly( var simpleUploadCommand = new SimpleUploadCommand(null, null, sourceRequest); return simpleUploadCommand.ConstructRequest(); }, - usesHeadersCollection: false, + usesHeadersCollection: true, (sourceRequest) => { sourceRequest.InputStream = new MemoryStream(1024); @@ -742,10 +742,14 @@ private static object GenerateTestValue(Type propertyType, string propertyName) }; } - // Integer types - if (propertyType == typeof(int) || propertyType == typeof(long)) + if (propertyType == typeof(int)) + { + return 1024; + } + + if (propertyType == typeof(long)) { - return 1024; + return 1024L; // Return long literal } if (propertyType == typeof(List)) From 88673e80e24a63f875dc9a2b60d0e164b320133f Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Tue, 4 Nov 2025 12:24:48 -0500 Subject: [PATCH 14/56] create tuabortmultipartuploadrequest (#4093) --- .../9d07dc1e-d82d-4f94-8700-c7b57f872041.json | 11 ++ .../Internal/AbortMultipartUploadsCommand.cs | 23 +++- .../AbortMultipartUploadsCommand.async.cs | 18 +-- ...nsferUtilityAbortMultipartUploadRequest.cs | 126 ++++++++++++++++++ .../Transfer/_async/TransferUtility.async.cs | 27 +++- .../_bcl+netstandard/TransferUtility.sync.cs | 18 +++ .../UnitTests/Custom/ResponseMapperTests.cs | 120 +++++++++++++++++ 7 files changed, 324 insertions(+), 19 deletions(-) create mode 100644 generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872041.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadRequest.cs diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872041.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872041.json new file mode 100644 index 000000000000..cec5b3eb153d --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872041.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Create AbortMultipartUploads api that takes in TransferUtilityAbortMultipartUploadRequest." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs index 3e3ca44376df..a0313c75b6c0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs @@ -31,24 +31,30 @@ namespace Amazon.S3.Transfer.Internal internal partial class AbortMultipartUploadsCommand : BaseCommand { IAmazonS3 _s3Client; - string _bucketName; - DateTime _initiatedDate; + TransferUtilityAbortMultipartUploadRequest _request; + TransferUtilityConfig _config; - internal AbortMultipartUploadsCommand(IAmazonS3 s3Client, string bucketName, DateTime initiateDate) + internal AbortMultipartUploadsCommand(IAmazonS3 s3Client, TransferUtilityAbortMultipartUploadRequest request, TransferUtilityConfig config) { this._s3Client = s3Client; - this._bucketName = bucketName; - this._initiatedDate = initiateDate; + this._request = request; + this._config = config; } internal ListMultipartUploadsRequest ConstructListMultipartUploadsRequest(ListMultipartUploadsResponse listResponse) { ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest() { - BucketName = this._bucketName, + BucketName = this._request.BucketName, KeyMarker = listResponse.KeyMarker, UploadIdMarker = listResponse.NextUploadIdMarker, + ExpectedBucketOwner = this._request.ExpectedBucketOwner, + RequestPayer = this._request.RequestPayer }; + + + + ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)listRequest).AddBeforeRequestHandler(this.RequestEventHandler); return listRequest; } @@ -57,10 +63,13 @@ internal AbortMultipartUploadRequest ConstructAbortMultipartUploadRequest(Multip { var abortRequest = new AbortMultipartUploadRequest() { - BucketName = this._bucketName, + BucketName = this._request.BucketName, Key = upload.Key, UploadId = upload.UploadId, + ExpectedBucketOwner = this._request.ExpectedBucketOwner, + RequestPayer = this._request.RequestPayer }; + ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)abortRequest).AddBeforeRequestHandler(this.RequestEventHandler); return abortRequest; } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs index ed3dd81903ea..4e55afcd34e8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs @@ -26,23 +26,19 @@ namespace Amazon.S3.Transfer.Internal { internal partial class AbortMultipartUploadsCommand : BaseCommand { - TransferUtilityConfig _config; - - internal AbortMultipartUploadsCommand(IAmazonS3 s3Client, string bucketName, DateTime initiateDate, TransferUtilityConfig config) - { - this._s3Client = s3Client; - this._bucketName = bucketName; - this._initiatedDate = initiateDate; - this._config = config; - } public override async Task ExecuteAsync(CancellationToken cancellationToken) { - if (string.IsNullOrEmpty(this._bucketName)) + if (string.IsNullOrEmpty(this._request.BucketName)) { throw new InvalidOperationException("The bucketName specified is null or empty!"); } + if (!this._request.IsSetInitiatedDate()) + { + throw new InvalidOperationException("InitiatedDate must be specified!"); + } + SemaphoreSlim asyncThrottler = null; CancellationTokenSource internalCts = null; try @@ -72,7 +68,7 @@ public override async Task ExecuteAsync(CancellationToken cancellationToken) // responses and throw the original exception. break; } - if (upload.Initiated < this._initiatedDate) + if (upload.Initiated < this._request.InitiatedDate.Value) { await asyncThrottler.WaitAsync(cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadRequest.cs new file mode 100644 index 000000000000..d048d128f15f --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadRequest.cs @@ -0,0 +1,126 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer +{ + /// + /// Contains all the parameters that can be set when making a request to abort multipart uploads + /// with the TransferUtility method. + /// + public class TransferUtilityAbortMultipartUploadRequest + { + private string _bucketName; + private DateTime? _initiatedDate; + private string _expectedBucketOwner; + private RequestPayer _requestPayer; + + /// + /// Gets or sets the name of the bucket containing multipart uploads. + /// + /// + /// The name of the bucket containing multipart uploads. + /// + public string BucketName + { + get { return this._bucketName; } + set { this._bucketName = value; } + } + + /// + /// Checks if BucketName property is set. + /// + /// true if BucketName property is set. + internal bool IsSetBucketName() + { + return !string.IsNullOrEmpty(this._bucketName); + } + + /// + /// Gets or sets the date before which the multipart uploads were initiated. + /// + /// + /// The date before which the multipart uploads were initiated. + /// + public DateTime? InitiatedDate + { + get { return this._initiatedDate; } + set { this._initiatedDate = value; } + } + + /// + /// Checks if InitiatedDate property is set. + /// + /// true if InitiatedDate property is set. + internal bool IsSetInitiatedDate() + { + return this._initiatedDate.HasValue; + } + + /// + /// Gets or sets the account ID of the expected bucket owner. + /// If the account ID that you provide does not match the actual owner of the bucket, + /// the request fails with the HTTP status code 403 Forbidden (access denied). + /// + /// + /// The account ID of the expected bucket owner. + /// + public string ExpectedBucketOwner + { + get { return this._expectedBucketOwner; } + set { this._expectedBucketOwner = value; } + } + + /// + /// Checks if ExpectedBucketOwner property is set. + /// + /// true if ExpectedBucketOwner property is set. + internal bool IsSetExpectedBucketOwner() + { + return !string.IsNullOrEmpty(this._expectedBucketOwner); + } + + /// + /// Gets or sets the request payer setting for the abort multipart upload operations. + /// Confirms that the requester knows that they will be charged for the request. + /// Bucket owners need not specify this parameter in their requests. + /// + /// + /// The request payer setting for the abort multipart upload operations. + /// + public RequestPayer RequestPayer + { + get { return this._requestPayer; } + set { this._requestPayer = value; } + } + + /// + /// Checks if RequestPayer property is set. + /// + /// true if RequestPayer property is set. + internal bool IsSetRequestPayer() + { + return this._requestPayer != null; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index 6622b2622e38..35205ad93f3a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -238,7 +238,32 @@ public partial class TransferUtility : ITransferUtility using(CreateSpan(nameof(AbortMultipartUploadsAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) { CheckForBlockedArn(bucketName, "AbortMultipartUploads"); - var command = new AbortMultipartUploadsCommand(this._s3Client, bucketName, initiatedDate, this._config); + var request = new TransferUtilityAbortMultipartUploadRequest + { + BucketName = bucketName, + InitiatedDate = initiatedDate + }; + var command = new AbortMultipartUploadsCommand(this._s3Client, request, this._config); + await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + + /// + /// Aborts the multipart uploads based on the specified request parameters. + /// + /// + /// Contains all the parameters required to abort multipart uploads. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation. + public async Task AbortMultipartUploadsAsync(TransferUtilityAbortMultipartUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(AbortMultipartUploadsAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "AbortMultipartUploads"); + var command = new AbortMultipartUploadsCommand(this._s3Client, request, this._config); await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); } } diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index 659ddc9c7cae..f1ff62ce820d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -459,6 +459,24 @@ public void AbortMultipartUploads(string bucketName, DateTime initiatedDate) } } + /// + /// Aborts the multipart uploads based on the specified request parameters. + /// + /// + /// Contains all the parameters required to abort multipart uploads. + /// + public void AbortMultipartUploads(TransferUtilityAbortMultipartUploadRequest request) + { + try + { + AbortMultipartUploadsAsync(request).Wait(); + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + } + } + #endregion } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index 5f332443d0a5..5b19acae5b76 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -260,6 +260,126 @@ public void MapUploadRequest_AbortMultipartRequest_AllMappedProperties_WorkCorre }); } + [TestMethod] + [TestCategory("S3")] + public void MapAbortMultipartUploadsCommand_ConstructAbortMultipartUploadRequest_AllMappedProperties_WorkCorrectly() + { + // Create a TransferUtilityAbortMultipartUploadRequest with all fields set + var abortRequest = new TransferUtilityAbortMultipartUploadRequest + { + BucketName = "test-bucket", + InitiatedDate = DateTime.UtcNow.AddDays(-1), + ExpectedBucketOwner = "test-bucket-owner", + RequestPayer = RequestPayer.Requester + }; + + // Create the command with the new constructor + var abortCommand = new AbortMultipartUploadsCommand(null, abortRequest, null); + + // Create a test MultipartUpload + var multipartUpload = new MultipartUpload + { + Key = "test-key", + UploadId = "test-upload-id" + }; + + // Call the method we want to test + var result = abortCommand.ConstructAbortMultipartUploadRequest(multipartUpload); + + // Validate all fields are properly mapped + Assert.IsNotNull(result, "Result should not be null"); + Assert.AreEqual("test-bucket", result.BucketName, "BucketName should match"); + Assert.AreEqual("test-key", result.Key, "Key should match from MultipartUpload"); + Assert.AreEqual("test-upload-id", result.UploadId, "UploadId should match from MultipartUpload"); + Assert.AreEqual("test-bucket-owner", result.ExpectedBucketOwner, "ExpectedBucketOwner should be set"); + Assert.AreEqual(RequestPayer.Requester, result.RequestPayer, "RequestPayer should be set"); + } + + [TestMethod] + [TestCategory("S3")] + public void MapAbortMultipartUploadsCommand_ConstructListMultipartUploadsRequest_AllMappedProperties_WorkCorrectly() + { + // Create a TransferUtilityAbortMultipartUploadRequest with all fields set + var abortRequest = new TransferUtilityAbortMultipartUploadRequest + { + BucketName = "test-bucket", + InitiatedDate = DateTime.UtcNow.AddDays(-1), + ExpectedBucketOwner = "test-bucket-owner", + RequestPayer = RequestPayer.Requester + }; + + // Create the command with the new constructor + var abortCommand = new AbortMultipartUploadsCommand(null, abortRequest, null); + + // Create a test ListMultipartUploadsResponse + var listResponse = new ListMultipartUploadsResponse + { + KeyMarker = "test-key-marker", + NextUploadIdMarker = "test-upload-id-marker" + }; + + // Call the method we want to test + var result = abortCommand.ConstructListMultipartUploadsRequest(listResponse); + + // Validate all fields are properly mapped + Assert.IsNotNull(result, "Result should not be null"); + Assert.AreEqual("test-bucket", result.BucketName, "BucketName should match"); + Assert.AreEqual("test-key-marker", result.KeyMarker, "KeyMarker should match from response"); + Assert.AreEqual("test-upload-id-marker", result.UploadIdMarker, "UploadIdMarker should match from response"); + Assert.AreEqual("test-bucket-owner", result.ExpectedBucketOwner, "ExpectedBucketOwner should be set"); + Assert.AreEqual(RequestPayer.Requester, result.RequestPayer, "RequestPayer should be set"); + } + + [TestMethod] + [TestCategory("S3")] + public void MapAbortMultipartUploadsCommand_MinimalRequest_DoesNotSetOptionalFields() + { + // Create a minimal request with only required fields (BucketName and InitiatedDate) + var abortRequest = new TransferUtilityAbortMultipartUploadRequest + { + BucketName = "test-bucket", + InitiatedDate = DateTime.UtcNow.AddDays(-1) + // ExpectedBucketOwner and RequestPayer are not set (null) + }; + + // Create the command with the minimal request + var abortCommand = new AbortMultipartUploadsCommand(null, abortRequest, null); + + // Test ConstructAbortMultipartUploadRequest + var multipartUpload = new MultipartUpload + { + Key = "test-key", + UploadId = "test-upload-id" + }; + + var abortResult = abortCommand.ConstructAbortMultipartUploadRequest(multipartUpload); + + // Validate core fields are set but optional fields are not + Assert.IsNotNull(abortResult, "AbortMultipartUploadRequest should not be null"); + Assert.AreEqual("test-bucket", abortResult.BucketName, "BucketName should match"); + Assert.AreEqual("test-key", abortResult.Key, "Key should match"); + Assert.AreEqual("test-upload-id", abortResult.UploadId, "UploadId should match"); + Assert.IsNull(abortResult.ExpectedBucketOwner, "ExpectedBucketOwner should be null with minimal request"); + Assert.IsNull(abortResult.RequestPayer, "RequestPayer should be null with minimal request"); + + // Test ConstructListMultipartUploadsRequest + var listResponse = new ListMultipartUploadsResponse + { + KeyMarker = "test-key-marker", + NextUploadIdMarker = "test-upload-id-marker" + }; + + var listResult = abortCommand.ConstructListMultipartUploadsRequest(listResponse); + + // Validate core fields are set but optional fields are not + Assert.IsNotNull(listResult, "ListMultipartUploadsRequest should not be null"); + Assert.AreEqual("test-bucket", listResult.BucketName, "BucketName should match"); + Assert.AreEqual("test-key-marker", listResult.KeyMarker, "KeyMarker should match"); + Assert.AreEqual("test-upload-id-marker", listResult.UploadIdMarker, "UploadIdMarker should match"); + Assert.IsNull(listResult.ExpectedBucketOwner, "ExpectedBucketOwner should be null with minimal request"); + Assert.IsNull(listResult.RequestPayer, "RequestPayer should be null with minimal request"); + } + [TestMethod] [TestCategory("S3")] public void MapPutObjectResponse_NullValues_HandledCorrectly() From 0f9f10e82404bb5e936403d154b0cf3dfb624781 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 6 Nov 2025 10:31:37 -0500 Subject: [PATCH 15/56] Remove AmazonWebServiceResponse as base class for transfer utility repsonse objects. (#4087) stack-info: PR: https://github.com/aws/aws-sdk-net/pull/4087, branch: GarrettBeatty/stacked/12 --- .../c49077d9-90b3-437f-b316-6d8d8833ae75.json | 11 +++++++++++ .../Custom/Transfer/Internal/ResponseMapper.cs | 16 ---------------- .../Transfer/TransferUtilityDownloadResponse.cs | 2 +- .../Transfer/TransferUtilityUploadResponse.cs | 2 +- .../S3/UnitTests/Custom/ResponseMapperTests.cs | 13 +++++-------- 5 files changed, 18 insertions(+), 26 deletions(-) create mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json new file mode 100644 index 000000000000..999c11e35b3e --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Remove AmazonWebServiceResponse as base class for transfer utility repsonse objects." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs index 302ffd5e2fce..0eb055d6db1b 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -63,11 +63,6 @@ internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResp response.VersionId = source.VersionId; response.Size = source.Size; - // Copy response metadata - response.ResponseMetadata = source.ResponseMetadata; - response.ContentLength = source.ContentLength; - response.HttpStatusCode = source.HttpStatusCode; - return response; } @@ -102,11 +97,6 @@ internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse response.Key = source.Key; response.Location = source.Location; - // Copy response metadata - response.ResponseMetadata = source.ResponseMetadata; - response.ContentLength = source.ContentLength; - response.HttpStatusCode = source.HttpStatusCode; - return response; } @@ -157,12 +147,6 @@ internal static TransferUtilityDownloadResponse MapGetObjectResponse(GetObjectRe response.TagCount = source.TagCount; response.VersionId = source.VersionId; response.WebsiteRedirectLocation = source.WebsiteRedirectLocation; - - // Copy response metadata - response.ResponseMetadata = source.ResponseMetadata; - response.ContentLength = source.ContentLength; - response.HttpStatusCode = source.HttpStatusCode; - return response; } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs index d10c72f47c0f..761bb1454146 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs @@ -31,7 +31,7 @@ namespace Amazon.S3.Transfer /// Response object for Transfer Utility download operations. /// Contains response metadata from download operations. /// - public class TransferUtilityDownloadResponse : AmazonWebServiceResponse + public class TransferUtilityDownloadResponse { /// /// Gets and sets the AcceptRanges property. diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs index fc3218f0411e..e7361bfd629f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs @@ -32,7 +32,7 @@ namespace Amazon.S3.Transfer /// Contains unified response fields from both simple uploads (PutObjectResponse) /// and multipart uploads (CompleteMultipartUploadResponse). /// - public class TransferUtilityUploadResponse : AmazonWebServiceResponse + public class TransferUtilityUploadResponse { private bool? _bucketKeyEnabled; private string _bucketName; diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index 5b19acae5b76..32310545fb27 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -149,8 +149,7 @@ public void MapPutObjectResponse_AllMappedProperties_WorkCorrectly() }, (sourceResponse, targetResponse) => { - Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); - Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); } @@ -644,12 +643,11 @@ public void MapCompleteMultipartUploadResponse_AllMappedProperties_WorkCorrectly (sourceResponse) => { sourceResponse.HttpStatusCode = HttpStatusCode.OK; - sourceResponse.ContentLength = 2048; + sourceResponse.ContentLength = 1024; }, (sourceResponse, targetResponse) => { - Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); - Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); } @@ -720,12 +718,11 @@ public void MapGetObjectResponse_AllMappedProperties_WorkCorrectly() (sourceResponse) => { sourceResponse.HttpStatusCode = HttpStatusCode.OK; - sourceResponse.ContentLength = 2048; + sourceResponse.ContentLength = 1024; }, (sourceResponse, targetResponse) => { - Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); - Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); } From e2c4fce7abd4a9429c559e755aeb851b1144bb6a Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 6 Nov 2025 10:33:36 -0500 Subject: [PATCH 16/56] Add GetObjectResponse to TransferUtilityOpenStreamResponse mapping. (#4076) stack-info: PR: https://github.com/aws/aws-sdk-net/pull/4076, branch: GarrettBeatty/stacked/9 --- .../252dad9f-d2a9-4d49-bff8-000924f0add4.json | 11 + .../Transfer/Internal/ResponseMapper.cs | 116 ++++--- .../TransferUtilityDownloadResponse.cs | 265 +--------------- .../TransferUtilityGetObjectResponseBase.cs | 293 ++++++++++++++++++ .../TransferUtilityOpenStreamResponse.cs | 97 ++++++ .../EmbeddedResource/property-aliases.json | 7 + .../UnitTests/Custom/ResponseMapperTests.cs | 198 ++++++++++++ 7 files changed, 685 insertions(+), 302 deletions(-) create mode 100644 generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs diff --git a/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json b/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json new file mode 100644 index 000000000000..3c1fff65ffab --- /dev/null +++ b/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Add GetObjectResponse to TransferUtilityDownloadResponse mapping." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs index 0eb055d6db1b..2ba493556c35 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -20,6 +20,7 @@ * */ +using System; using System.Collections.Generic; using Amazon.S3.Model; @@ -37,10 +38,11 @@ internal static class ResponseMapper /// /// The PutObjectResponse to map from /// A new TransferUtilityUploadResponse with mapped fields + /// Thrown when source is null internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResponse source) { if (source == null) - return null; + throw new ArgumentNullException(nameof(source)); var response = new TransferUtilityUploadResponse(); @@ -72,10 +74,11 @@ internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResp ///
/// The CompleteMultipartUploadResponse to map from /// A new TransferUtilityUploadResponse with mapped fields + /// Thrown when source is null internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse(CompleteMultipartUploadResponse source) { if (source == null) - return null; + throw new ArgumentNullException(nameof(source)); var response = new TransferUtilityUploadResponse(); @@ -100,55 +103,92 @@ internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse return response; } + /// + /// Private helper method to populate the common properties from GetObjectResponse to the base response class. + /// Contains all the shared mapping logic for GetObjectResponse fields. + /// + /// The GetObjectResponse to map from + /// The TransferUtilityGetObjectResponseBase to populate + /// Thrown when source or target is null + private static void PopulateGetObjectResponseBase(GetObjectResponse source, TransferUtilityGetObjectResponseBase target) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + if (target == null) + throw new ArgumentNullException(nameof(target)); + + // Map all fields as defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse" + target.AcceptRanges = source.AcceptRanges; + target.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + target.ChecksumCRC32 = source.ChecksumCRC32; + target.ChecksumCRC32C = source.ChecksumCRC32C; + target.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + target.ChecksumSHA1 = source.ChecksumSHA1; + target.ChecksumSHA256 = source.ChecksumSHA256; + target.ChecksumType = source.ChecksumType; + target.ContentRange = source.ContentRange; + target.Headers = source.Headers; + target.DeleteMarker = source.DeleteMarker; + target.ETag = source.ETag; + target.Expiration = source.Expiration; + target.ExpiresString = source.ExpiresString; + target.LastModified = source.LastModified; + target.Metadata = source.Metadata; + target.MissingMeta = source.MissingMeta; + target.ObjectLockLegalHoldStatus = source.ObjectLockLegalHoldStatus; + target.ObjectLockMode = source.ObjectLockMode; + target.ObjectLockRetainUntilDate = source.ObjectLockRetainUntilDate; + target.PartsCount = source.PartsCount; + target.ReplicationStatus = source.ReplicationStatus; + target.RequestCharged = source.RequestCharged; + target.RestoreExpiration = source.RestoreExpiration; + target.RestoreInProgress = source.RestoreInProgress; + target.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; + target.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; + target.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + target.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + target.StorageClass = source.StorageClass; + target.TagCount = source.TagCount; + target.VersionId = source.VersionId; + target.WebsiteRedirectLocation = source.WebsiteRedirectLocation; + } + /// /// Maps a GetObjectResponse to TransferUtilityDownloadResponse. /// Uses the field mappings defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse". /// /// The GetObjectResponse to map from /// A new TransferUtilityDownloadResponse with mapped fields + /// Thrown when source is null internal static TransferUtilityDownloadResponse MapGetObjectResponse(GetObjectResponse source) { if (source == null) - return null; + throw new ArgumentNullException(nameof(source)); var response = new TransferUtilityDownloadResponse(); - - // Map all fields as defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse" - response.AcceptRanges = source.AcceptRanges; - response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); - response.ChecksumCRC32 = source.ChecksumCRC32; - response.ChecksumCRC32C = source.ChecksumCRC32C; - response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; - response.ChecksumSHA1 = source.ChecksumSHA1; - response.ChecksumSHA256 = source.ChecksumSHA256; - response.ChecksumType = source.ChecksumType; - response.ContentRange = source.ContentRange; - response.Headers = source.Headers; - response.DeleteMarker = source.DeleteMarker; - response.ETag = source.ETag; - response.Expiration = source.Expiration; - response.ExpiresString = source.ExpiresString; - response.LastModified = source.LastModified; - response.Metadata = source.Metadata; - response.MissingMeta = source.MissingMeta; - response.ObjectLockLegalHoldStatus = source.ObjectLockLegalHoldStatus; - response.ObjectLockMode = source.ObjectLockMode; - response.ObjectLockRetainUntilDate = source.ObjectLockRetainUntilDate; - response.PartsCount = source.PartsCount; - response.ReplicationStatus = source.ReplicationStatus; - response.RequestCharged = source.RequestCharged; - response.RestoreExpiration = source.RestoreExpiration; - response.RestoreInProgress = source.RestoreInProgress; - response.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; - response.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; - response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; - response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; - response.StorageClass = source.StorageClass; - response.TagCount = source.TagCount; - response.VersionId = source.VersionId; - response.WebsiteRedirectLocation = source.WebsiteRedirectLocation; + PopulateGetObjectResponseBase(source, response); return response; } + + /// + /// Maps a GetObjectResponse to TransferUtilityOpenStreamResponse. + /// Uses the same field mappings as DownloadResponse plus the ResponseStream property. + /// + /// The GetObjectResponse to map from + /// A new TransferUtilityOpenStreamResponse with mapped fields + /// Thrown when source is null + internal static TransferUtilityOpenStreamResponse MapGetObjectResponseToOpenStream(GetObjectResponse source) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + + var response = new TransferUtilityOpenStreamResponse(); + PopulateGetObjectResponseBase(source, response); + response.ResponseStream = source.ResponseStream; + + return response; + } + } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs index 761bb1454146..36474a64c0aa 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs @@ -31,270 +31,7 @@ namespace Amazon.S3.Transfer /// Response object for Transfer Utility download operations. /// Contains response metadata from download operations. ///
- public class TransferUtilityDownloadResponse + public class TransferUtilityDownloadResponse : TransferUtilityGetObjectResponseBase { - /// - /// Gets and sets the AcceptRanges property. - /// - public string AcceptRanges { get; set; } - - /// - /// Gets and sets the property BucketKeyEnabled. - /// - /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with - /// Amazon Web Services KMS (SSE-KMS). - /// - /// - public bool? BucketKeyEnabled { get; set; } - - /// - /// The collection of headers for the response. - /// - public HeadersCollection Headers { get; set; } - - /// - /// Gets and sets the property ChecksumCRC32. - /// - /// The Base64 encoded, 32-bit CRC-32 checksum of the object. - /// - /// - public string ChecksumCRC32 { get; set; } - - /// - /// Gets and sets the property ChecksumCRC32C. - /// - /// The Base64 encoded, 32-bit CRC-32C checksum of the object. - /// - /// - public string ChecksumCRC32C { get; set; } - - /// - /// Gets and sets the property ChecksumCRC64NVME. - /// - /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. - /// - /// - public string ChecksumCRC64NVME { get; set; } - - /// - /// Gets and sets the property ChecksumSHA1. - /// - /// The Base64 encoded, 160-bit SHA-1 digest of the object. - /// - /// - public string ChecksumSHA1 { get; set; } - - /// - /// Gets and sets the property ChecksumSHA256. - /// - /// The Base64 encoded, 256-bit SHA-256 checksum of the object. - /// - /// - public string ChecksumSHA256 { get; set; } - - /// - /// Gets and sets the property ChecksumType. - /// - /// The checksum type used to calculate the object-level checksum. - /// - /// - public ChecksumType ChecksumType { get; set; } - - /// - /// Gets and sets the ContentRange property. - /// - public string ContentRange { get; set; } - - /// - /// Gets and sets the DeleteMarker property. - /// - /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. - /// - /// - public string DeleteMarker { get; set; } - - /// - /// Gets and sets the ETag property. - /// - /// An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. - /// - /// - public string ETag { get; set; } - - /// - /// Gets and sets the property Expiration. - /// - /// If the object expiration is configured, this will contain the expiration date and rule ID. - /// - /// - public Expiration Expiration { get; set; } - - /// - /// Gets and sets the ExpiresString property. - /// - /// The date and time at which the object is no longer cacheable (string format). - /// - /// - public string ExpiresString { get; set; } - - /// - /// Gets and sets the property LastModified. - /// - /// Date and time when the object was last modified. - /// - /// - public DateTime? LastModified { get; set; } - - /// - /// Gets and sets the Metadata property. - /// - /// The collection of metadata for the object. - /// - /// - public MetadataCollection Metadata { get; set; } - - /// - /// Gets and sets the property MissingMeta. - /// - /// This is set to the number of metadata entries not returned in the headers that are - /// prefixed with x-amz-meta-. - /// - /// - public int? MissingMeta { get; set; } - - /// - /// Gets and sets the property ObjectLockLegalHoldStatus. - /// - /// Indicates whether this object has an active legal hold. - /// - /// - public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus { get; set; } - - /// - /// Gets and sets the property ObjectLockMode. - /// - /// The Object Lock mode that's currently in place for this object. - /// - /// - public ObjectLockMode ObjectLockMode { get; set; } - - /// - /// Gets and sets the property ObjectLockRetainUntilDate. - /// - /// The date and time when this object's Object Lock will expire. - /// - /// - public DateTime? ObjectLockRetainUntilDate { get; set; } - - /// - /// Gets and sets the PartsCount property. - /// - /// The number of parts this object has. - /// - /// - public int? PartsCount { get; set; } - - /// - /// Gets and sets the property ReplicationStatus. - /// - /// Amazon S3 can return this if your request involves a bucket that is either a source - /// or destination in a replication rule. - /// - /// - public ReplicationStatus ReplicationStatus { get; set; } - - /// - /// Gets and sets the RequestCharged property. - /// - /// If present, indicates that the requester was successfully charged for the request. - /// - /// - public RequestCharged RequestCharged { get; set; } - - /// - /// Gets and sets the RestoreExpiration property. - /// - /// RestoreExpiration will be set for objects that have been restored from Amazon Glacier. - /// It indicates for those objects how long the restored object will exist. - /// - /// - public DateTime? RestoreExpiration { get; set; } - - /// - /// Gets and sets the RestoreInProgress - /// - /// Will be true when the object is in the process of being restored from Amazon Glacier. - /// - /// - /// This functionality is not supported for directory buckets. - /// Only the S3 Express One Zone storage class is supported by directory buckets to store objects. - /// - /// - public bool? RestoreInProgress { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionCustomerMethod property. - /// - /// The server-side encryption algorithm to be used with the customer provided key. - /// - /// - public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionCustomerProvidedKeyMD5 property. - /// - /// The MD5 server-side encryption of the customer-provided encryption key. - /// - /// - public string ServerSideEncryptionCustomerProvidedKeyMD5 { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionKeyManagementServiceKeyId property. - /// - /// If present, indicates the ID of the KMS key that was used for object encryption. - /// - /// - public string ServerSideEncryptionKeyManagementServiceKeyId { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionMethod property. - /// - /// The server-side encryption algorithm used when you store this object in Amazon S3. - /// - /// - public ServerSideEncryptionMethod ServerSideEncryptionMethod { get; set; } - - /// - /// Gets and sets the property StorageClass. - /// - /// Provides storage class information of the object. - /// - /// - public S3StorageClass StorageClass { get; set; } - - /// - /// Gets and sets the property TagCount. - /// - /// The number of tags, if any, on the object. - /// - /// - public int TagCount { get; set; } - - /// - /// Gets and sets the property VersionId. - /// - /// Version ID of the object. - /// - /// - public string VersionId { get; set; } - - /// - /// Gets and sets the property WebsiteRedirectLocation. - /// - /// If the bucket is configured as a website, redirects requests for this object to another - /// object in the same bucket or to an external URL. - /// - /// - public string WebsiteRedirectLocation { get; set; } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs new file mode 100644 index 000000000000..431d498afe9e --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs @@ -0,0 +1,293 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Generic; +using Amazon.Runtime; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer +{ + /// + /// Base response object for Transfer Utility operations that retrieve S3 object metadata. + /// Contains response metadata from S3 GetObject operations. + /// + public abstract class TransferUtilityGetObjectResponseBase + { + /// + /// Gets and sets the AcceptRanges property. + /// + public string AcceptRanges { get; set; } + + /// + /// Gets and sets the property BucketKeyEnabled. + /// + /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with + /// Amazon Web Services KMS (SSE-KMS). + /// + /// + public bool? BucketKeyEnabled { get; set; } + + /// + /// The collection of headers for the response. + /// + public HeadersCollection Headers { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32. + /// + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. + /// + /// + public string ChecksumCRC32 { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32C. + /// + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. + /// + /// + public string ChecksumCRC32C { get; set; } + + /// + /// Gets and sets the property ChecksumCRC64NVME. + /// + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. + /// + /// + public string ChecksumCRC64NVME { get; set; } + + /// + /// Gets and sets the property ChecksumSHA1. + /// + /// The Base64 encoded, 160-bit SHA-1 digest of the object. + /// + /// + public string ChecksumSHA1 { get; set; } + + /// + /// Gets and sets the property ChecksumSHA256. + /// + /// The Base64 encoded, 256-bit SHA-256 checksum of the object. + /// + /// + public string ChecksumSHA256 { get; set; } + + /// + /// Gets and sets the property ChecksumType. + /// + /// The checksum type used to calculate the object-level checksum. + /// + /// + public ChecksumType ChecksumType { get; set; } + + /// + /// Gets and sets the ContentRange property. + /// + public string ContentRange { get; set; } + + /// + /// Gets and sets the DeleteMarker property. + /// + /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + /// + /// + public string DeleteMarker { get; set; } + + /// + /// Gets and sets the ETag property. + /// + /// An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. + /// + /// + public string ETag { get; set; } + + /// + /// Gets and sets the property Expiration. + /// + /// If the object expiration is configured, this will contain the expiration date and rule ID. + /// + /// + public Expiration Expiration { get; set; } + + /// + /// Gets and sets the ExpiresString property. + /// + /// The date and time at which the object is no longer cacheable (string format). + /// + /// + public string ExpiresString { get; set; } + + /// + /// Gets and sets the property LastModified. + /// + /// Date and time when the object was last modified. + /// + /// + public DateTime? LastModified { get; set; } + + /// + /// Gets and sets the Metadata property. + /// + /// The collection of metadata for the object. + /// + /// + public MetadataCollection Metadata { get; set; } + + /// + /// Gets and sets the property MissingMeta. + /// + /// This is set to the number of metadata entries not returned in the headers that are + /// prefixed with x-amz-meta-. + /// + /// + public int? MissingMeta { get; set; } + + /// + /// Gets and sets the property ObjectLockLegalHoldStatus. + /// + /// Indicates whether this object has an active legal hold. + /// + /// + public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus { get; set; } + + /// + /// Gets and sets the property ObjectLockMode. + /// + /// The Object Lock mode that's currently in place for this object. + /// + /// + public ObjectLockMode ObjectLockMode { get; set; } + + /// + /// Gets and sets the property ObjectLockRetainUntilDate. + /// + /// The date and time when this object's Object Lock will expire. + /// + /// + public DateTime? ObjectLockRetainUntilDate { get; set; } + + /// + /// Gets and sets the PartsCount property. + /// + /// The number of parts this object has. + /// + /// + public int? PartsCount { get; set; } + + /// + /// Gets and sets the property ReplicationStatus. + /// + /// Amazon S3 can return this if your request involves a bucket that is either a source + /// or destination in a replication rule. + /// + /// + public ReplicationStatus ReplicationStatus { get; set; } + + /// + /// Gets and sets the RequestCharged property. + /// + /// If present, indicates that the requester was successfully charged for the request. + /// + /// + public RequestCharged RequestCharged { get; set; } + + /// + /// Gets and sets the RestoreExpiration property. + /// + /// RestoreExpiration will be set for objects that have been restored from Amazon Glacier. + /// It indicates for those objects how long the restored object will exist. + /// + /// + public DateTime? RestoreExpiration { get; set; } + + /// + /// + /// + public bool? RestoreInProgress { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerMethod property. + /// + /// The server-side encryption algorithm to be used with the customer provided key. + /// + /// + public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerProvidedKeyMD5 property. + /// + /// The MD5 server-side encryption of the customer-provided encryption key. + /// + /// + public string ServerSideEncryptionCustomerProvidedKeyMD5 { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionKeyManagementServiceKeyId property. + /// + /// If present, indicates the ID of the KMS key that was used for object encryption. + /// + /// + public string ServerSideEncryptionKeyManagementServiceKeyId { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionMethod property. + /// + /// The server-side encryption algorithm used when you store this object in Amazon S3. + /// + /// + public ServerSideEncryptionMethod ServerSideEncryptionMethod { get; set; } + + /// + /// Gets and sets the property StorageClass. + /// + /// Provides storage class information of the object. + /// + /// + public S3StorageClass StorageClass { get; set; } + + /// + /// Gets and sets the property TagCount. + /// + /// The number of tags, if any, on the object. + /// + /// + public int? TagCount { get; set; } + + /// + /// Gets and sets the property VersionId. + /// + /// Version ID of the object. + /// + /// + public string VersionId { get; set; } + + /// + /// Gets and sets the property WebsiteRedirectLocation. + /// + /// If the bucket is configured as a website, redirects requests for this object to another + /// object in the same bucket or to an external URL. + /// + /// + public string WebsiteRedirectLocation { get; set; } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs new file mode 100644 index 000000000000..df2f57bce35f --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs @@ -0,0 +1,97 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.IO; +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility open stream operations. + /// Contains the stream and response metadata from open stream operations. + /// + public class TransferUtilityOpenStreamResponse : TransferUtilityGetObjectResponseBase, IDisposable + { + private bool disposed; + private Stream responseStream; + + #region Dispose Pattern + + /// + /// Disposes of all managed and unmanaged resources. + /// + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + /// + /// Releases the unmanaged resources used by the TransferUtilityOpenStreamResponse and optionally disposes of the managed resources. + /// + /// true to release both managed and unmanaged resources; false to releases only unmanaged resources. + protected virtual void Dispose(bool disposing) + { + if (!this.disposed) + { + if (disposing) + { + // Remove Managed Resources + // I.O.W. remove resources that have to be explicitly + // "Dispose"d or Closed. For an S3 Response, these are: + // 1. The Response Stream for GET Object requests + // 2. The HttpResponse object for GET Object requests + if (responseStream != null) + { + responseStream.Dispose(); + } + } + + responseStream = null; + disposed = true; + } + } + + #endregion + + /// + /// Gets and sets the ResponseStream property. + /// + /// An open stream read from to get the data from S3. In order to + /// use this stream without leaking the underlying resource, please + /// wrap access to the stream within a using block. + /// + /// + public Stream ResponseStream + { + get { return this.responseStream; } + set { this.responseStream = value; } + } + + // Check to see if ResponseStream property is set + internal bool IsSetResponseStream() + { + return this.responseStream != null; + } + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json index 63216442578e..6e08ac4a05d2 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -132,6 +132,13 @@ "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", "ServerSideEncryption": "ServerSideEncryptionMethod", "Restore": "RestoreExpiration" + }, + "TransferUtilityOpenStreamResponse": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "Restore": "RestoreExpiration" } } } \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index 32310545fb27..046b1f9418bc 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -804,6 +804,204 @@ public void ValidateTransferUtilityUploadRequestDefinitionCompleteness() }); } + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "GetObjectResponse", "DownloadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapGetObjectResponseToOpenStream(sourceResponse); + }, + usesHeadersCollection: true, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 1024; + sourceResponse.ResponseStream = new MemoryStream(new byte[1024]); + }, + (sourceResponse, targetResponse) => + { + Assert.AreSame(sourceResponse.ResponseStream, targetResponse.ResponseStream, "ResponseStream should be the same instance"); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new GetObjectResponse { Expiration = null }, + + // Test null enum conversions + new GetObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null }, + + // Test null ResponseStream + new GetObjectResponse { ResponseStream = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapGetObjectResponseToOpenStream(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + + if (testCase.ResponseStream == null) + { + Assert.IsNull(mapped.ResponseStream, "Null ResponseStream should map to null"); + } + } + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_ResponseStream_HandledCorrectly() + { + // Test with actual stream + var testStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var sourceResponse = new GetObjectResponse + { + ResponseStream = testStream, + ETag = "test-etag", + Headers = { ContentLength = 5 } + }; + + var mappedResponse = ResponseMapper.MapGetObjectResponseToOpenStream(sourceResponse); + + Assert.IsNotNull(mappedResponse, "Mapped response should not be null"); + Assert.AreSame(testStream, mappedResponse.ResponseStream, "ResponseStream should be the same instance"); + Assert.AreEqual("test-etag", mappedResponse.ETag, "Other properties should also be mapped"); + Assert.AreEqual(5, mappedResponse.Headers.ContentLength, "ContentLength should be mapped"); + + // Test with null stream + var sourceWithNullStream = new GetObjectResponse + { + ResponseStream = null, + ETag = "test-etag-2" + }; + + var mappedWithNullStream = ResponseMapper.MapGetObjectResponseToOpenStream(sourceWithNullStream); + + Assert.IsNotNull(mappedWithNullStream, "Mapped response should not be null even with null stream"); + Assert.IsNull(mappedWithNullStream.ResponseStream, "ResponseStream should be null when source is null"); + Assert.AreEqual("test-etag-2", mappedWithNullStream.ETag, "Other properties should still be mapped"); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_NullSource_ThrowsArgumentNullException() + { + Assert.ThrowsException(() => + ResponseMapper.MapGetObjectResponseToOpenStream(null), + "Mapping null source should throw ArgumentNullException"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_DisposesResponseStream() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = memoryStream, + ETag = "test-etag" + }; + + // Act + response.Dispose(); + + // Assert - accessing disposed stream should throw ObjectDisposedException + Assert.ThrowsException(() => _ = memoryStream.Length, + "Accessing Length of disposed stream should throw ObjectDisposedException"); + Assert.ThrowsException(() => _ = memoryStream.Position, + "Accessing Position of disposed stream should throw ObjectDisposedException"); + Assert.ThrowsException(() => memoryStream.Read(new byte[1], 0, 1), + "Reading from disposed stream should throw ObjectDisposedException"); + Assert.IsNull(response.ResponseStream, "ResponseStream should be null after disposal"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_MultipleCallsSafe() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = memoryStream + }; + + // Act - call dispose multiple times + response.Dispose(); + response.Dispose(); // Second call should not throw + + // Assert - stream should still be disposed after multiple dispose calls + Assert.ThrowsException(() => _ = memoryStream.Length, + "Stream should remain disposed after multiple dispose calls"); + Assert.ThrowsException(() => memoryStream.Read(new byte[1], 0, 1), + "Stream should remain disposed after multiple dispose calls"); + Assert.IsNull(response.ResponseStream, "ResponseStream should remain null after multiple dispose calls"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_NullStreamSafe() + { + // Arrange + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = null, + ETag = "test-etag" + }; + + // Act & Assert - should not throw + response.Dispose(); + Assert.IsNull(response.ResponseStream, "ResponseStream should remain null"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_UsingStatement_DisposesCorrectly() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + MemoryStream capturedStream = null; + + // Act + using (var response = new TransferUtilityOpenStreamResponse()) + { + response.ResponseStream = memoryStream; + response.ETag = "test-etag"; + capturedStream = memoryStream; + } // Dispose should be called here + + // Assert - stream should be disposed after using block + Assert.ThrowsException(() => _ = capturedStream.Length, + "Stream should be disposed after using block"); + Assert.ThrowsException(() => capturedStream.Read(new byte[1], 0, 1), + "Stream should be disposed after using block"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_ImplementsIDisposable() + { + // Assert + Assert.IsTrue(typeof(IDisposable).IsAssignableFrom(typeof(TransferUtilityOpenStreamResponse)), + "TransferUtilityOpenStreamResponse should implement IDisposable"); + } + + /// /// Generates appropriate test data for a given property type /// From 15469e770baa6e4a80a407797f518fd539cc1302 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 6 Nov 2025 16:09:51 -0500 Subject: [PATCH 17/56] Fix Unit test (#4108) --- .../Custom/MultipartUploadValidationTests.cs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs index 19ea71304c8f..9f0a6e4dc024 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs @@ -69,6 +69,18 @@ public async Task Validation_HappyPath() return new UploadPartResponse { PartNumber = request.PartNumber }; }); + s3Client + .Setup(x => x.CompleteMultipartUploadAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(new CompleteMultipartUploadResponse + { + BucketName = "test-bucket", + Key = "test", + ETag = "test-etag", + Location = "https://test-bucket.s3.amazonaws.com/test" + }); + var uploadRequest = new TransferUtilityUploadRequest { FilePath = _tempFilePath, From 7272f827d1a33d15deae41ee5c930d748ea47718 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 7 Nov 2025 09:38:50 -0500 Subject: [PATCH 18/56] Add UploadWithResponseAsync api (#4105) --- .../77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json | 11 + .../Internal/AbortMultipartUploadsCommand.cs | 2 +- .../Custom/Transfer/Internal/BaseCommand.cs | 11 +- .../Transfer/Internal/DownloadCommand.cs | 3 +- .../Internal/DownloadDirectoryCommand.cs | 2 +- .../Internal/MultipartUploadCommand.cs | 2 +- .../Transfer/Internal/OpenStreamCommand.cs | 7 +- .../Transfer/Internal/SimpleUploadCommand.cs | 2 +- .../Internal/UploadDirectoryCommand.cs | 2 +- .../AbortMultipartUploadsCommand.async.cs | 6 +- .../Internal/_async/BaseCommand.async.cs | 9 +- .../Internal/_async/DownloadCommand.async.cs | 7 +- .../_async/MultipartUploadCommand.async.cs | 10 +- .../_async/OpenStreamCommand.async.cs | 6 +- .../_async/SimpleUploadCommand.async.cs | 6 +- .../DownloadDirectoryCommand.cs | 6 +- .../UploadDirectoryCommand.cs | 6 +- .../S3/Custom/Transfer/TransferUtility.cs | 2 +- ...ferUtilityAbortMultipartUploadsResponse.cs | 35 +++ ...ransferUtilityDownloadDirectoryResponse.cs | 26 ++ .../TransferUtilityUploadDirectoryResponse.cs | 35 +++ .../Transfer/_async/TransferUtility.async.cs | 163 +++++++++++- .../_bcl+netstandard/TransferUtility.sync.cs | 148 +++++++++++ .../IntegrationTests/TransferUtilityTests.cs | 243 ++++++++++++++++++ 24 files changed, 710 insertions(+), 40 deletions(-) create mode 100644 generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs diff --git a/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json new file mode 100644 index 000000000000..be509aae4368 --- /dev/null +++ b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Create new UploadWithResponse API that returns response metadata information for transfer utility." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs index a0313c75b6c0..9bf25dc5421d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs @@ -28,7 +28,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityAbortMultipartUploadRequest _request; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs index 428758fa54e6..71d2685e6fda 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs @@ -30,13 +30,12 @@ namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + /// + /// Generic base command that returns a typed response + /// + /// Type of response returned by the command + internal abstract partial class BaseCommand where TResponse : class { - public virtual object Return - { - get { return null; } - } - internal GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) { GetObjectRequest getRequest = new GetObjectRequest() diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs index 6359704fc0cd..f8e45d7b20fe 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs @@ -33,7 +33,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { static int MAX_BACKOFF_IN_MILLISECONDS = (int)TimeSpan.FromSeconds(30).TotalMilliseconds; @@ -176,4 +176,3 @@ static ByteRange ByteRangeRemainingForDownload(string filepath) } } } - diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 0140554ded39..5058960d9a06 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -33,7 +33,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { private readonly IAmazonS3 _s3Client; private readonly TransferUtilityDownloadDirectoryRequest _request; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index dca8f3f076ac..9c6374502885 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -37,7 +37,7 @@ namespace Amazon.S3.Transfer.Internal /// /// The command to manage an upload using the S3 multipart API. /// - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { IAmazonS3 _s3Client; long _partSize; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs index 57eab52d3f98..0fdfc64bcbae 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs @@ -29,7 +29,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityOpenStreamRequest _request; @@ -59,10 +59,5 @@ internal Stream ResponseStream { get { return this._responseStream; } } - - public override object Return - { - get { return this.ResponseStream; } - } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs index d8de23a6145b..799b36fcde28 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs @@ -36,7 +36,7 @@ namespace Amazon.S3.Transfer.Internal /// /// This command is for doing regular PutObject requests. /// - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityConfig _config; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index e4be9b27aa74..693a9ef8325a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -32,7 +32,7 @@ namespace Amazon.S3.Transfer.Internal /// This command files all the files that meets the criteria specified in the TransferUtilityUploadDirectoryRequest request /// and uploads them. ///
- internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { TransferUtilityUploadDirectoryRequest _request; TransferUtility _utility; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs index 4e55afcd34e8..c4ae5bb8b9e5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs @@ -24,10 +24,10 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { if (string.IsNullOrEmpty(this._request.BucketName)) { @@ -84,6 +84,8 @@ await asyncThrottler.WaitAsync(cancellationToken) await WhenAllOrFirstExceptionAsync(pendingTasks,cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + return new TransferUtilityAbortMultipartUploadsResponse(); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs index f9591f6d1d68..65ee8d8cb4c8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs @@ -24,9 +24,12 @@ namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + internal abstract partial class BaseCommand where TResponse : class { - public abstract Task ExecuteAsync(CancellationToken cancellationToken); + /// + /// Executes the command and returns a typed response + /// + public abstract Task ExecuteAsync(CancellationToken cancellationToken); /// /// Waits for all of the tasks to complete or till any task fails or is canceled. @@ -80,7 +83,7 @@ await completedTask } } - protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) + protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) where T : class { try { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs index 3e536a4bb607..6baef9262774 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs @@ -28,9 +28,9 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { ValidateRequest(); GetObjectRequest getRequest = ConvertToGetObjectRequest(this._request); @@ -130,6 +130,9 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, canc } WaitBeforeRetry(retries); } while (shouldRetry); + + // TODO map and return response + return new TransferUtilityDownloadResponse(); } private static bool HandleExceptionForHttpClient(Exception exception, int retries, int maxRetries) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index dd747e3d83e8..3f29336f0fe0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -27,20 +27,20 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } Dictionary _expectedUploadParts = new Dictionary(); - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { // Fire transfer initiated event FIRST, before choosing path FireTransferInitiatedEvent(); if ( (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.InputStream.CanSeek) || this._fileTransporterRequest.ContentLength == -1) { - await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); + return await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); } else { @@ -144,6 +144,7 @@ await localThrottler.WaitAsync(cancellationToken) var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } catch (Exception e) { @@ -275,7 +276,7 @@ private void AbortMultipartUpload(string uploadId) Logger.InfoFormat("Error attempting to abort multipart for key {0}: {1}", this._fileTransporterRequest.Key, e.Message); } } - private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { cancellationToken.ThrowIfCancellationRequested(); @@ -376,6 +377,7 @@ private void AbortMultipartUpload(string uploadId) var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } } catch (Exception ex) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs index 192560f837ee..8c954d256fab 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs @@ -24,14 +24,16 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { var getRequest = ConstructRequest(); var response = await _s3Client.GetObjectAsync(getRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); _responseStream = response.ResponseStream; + // TODO map and return response + return new TransferUtilityOpenStreamResponse(); } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs index 51680eaaba09..1d936f0bdf5c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs @@ -24,11 +24,11 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { try { @@ -47,6 +47,8 @@ await this.AsyncThrottler.WaitAsync(cancellationToken) var mappedResponse = ResponseMapper.MapPutObjectResponse(response); FireTransferCompletedEvent(mappedResponse); + + return mappedResponse; } catch (Exception) { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index a160bc1504f4..be3fb4f0ae33 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -25,7 +25,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { TransferUtilityConfig _config; @@ -38,7 +38,7 @@ internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDir this._config = config; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { ValidateRequest(); EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); @@ -112,6 +112,8 @@ await asyncThrottler.WaitAsync(cancellationToken) } await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + return new TransferUtilityDownloadDirectoryResponse(); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index 75e1744d5435..10f09be9ed07 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -23,11 +23,11 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { public bool UploadFilesConcurrently { get; set; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { string prefix = GetKeyPrefix(); @@ -87,6 +87,8 @@ await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) if (asyncThrottler != null) asyncThrottler.Dispose(); } + + return new TransferUtilityUploadDirectoryResponse(); } private Task GetFiles(string path, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken) diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs index 472a5933ba28..f4dde2c232e2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs @@ -386,7 +386,7 @@ private static TransferUtilityUploadRequest ConstructUploadRequest(Stream stream }; } - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) { validate(request); diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs new file mode 100644 index 000000000000..6c63c4b0a75b --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility abort multipart uploads operations. + /// Contains response metadata from abort multipart uploads operations. + /// + public class TransferUtilityAbortMultipartUploadsResponse + { + // Empty placeholder class - properties will be added in future iterations + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs new file mode 100644 index 000000000000..6df0c1c5a619 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Contains the details returned from a Transfer Utility download directory operation. + /// + public class TransferUtilityDownloadDirectoryResponse + { + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs new file mode 100644 index 000000000000..94f32558d1fb --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility upload directory operations. + /// Contains response metadata from upload directory operations. + /// + public class TransferUtilityUploadDirectoryResponse + { + // Empty placeholder class - properties will be added in future iterations + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index 35205ad93f3a..92307954b039 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -217,6 +217,167 @@ public partial class TransferUtility : ITransferUtility await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); } } + + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(stream, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The part size buffer for the multipart upload is controlled by the partSize + /// specified on the TransferUtilityUploadRequest, and if none is specified it defaults to S3Constants.MinPartSize (5 megabytes). + /// You can also adjust the read buffer size (i.e. how many bytes to read before adding it to the + /// part buffer) via the BufferSize property on the ClientConfig. The default value for this is 8192 bytes. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(UploadWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "Upload"); + var command = GetUploadCommand(request, null); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } #endregion #region AbortMultipartUploads @@ -346,7 +507,7 @@ public partial class TransferUtility : ITransferUtility #endregion - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) { validate(request); if (IsMultipartUpload(request)) diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index f1ff62ce820d..c4d99745e3da 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -285,6 +285,154 @@ public void Upload(TransferUtilityUploadRequest request) } } + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName) + { + try + { + return UploadWithResponseAsync(filePath, bucketName).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key) + { + try + { + return UploadWithResponseAsync(filePath, bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key) + { + try + { + return UploadWithResponseAsync(stream, bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request) + { + try + { + return UploadWithResponseAsync(request).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + #endregion #region OpenStream diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 6aaa7ae3a5e3..967f07d4884f 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -13,6 +13,7 @@ using Amazon.Util; using System.Net.Mime; using System.Runtime.InteropServices.ComTypes; +using System.Threading.Tasks; namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 { @@ -1433,6 +1434,248 @@ public void TestMultipartUploadWithSetContentTypeNotOverwritten() Assert.IsTrue(metadata.Headers.ContentType.Equals(MediaTypeNames.Text.Plain)); } + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncSmallFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\SmallFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 1 * MEG_SIZE; // Small file for single-part upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For small files, we expect single-part upload behavior - ETag should be MD5 format (no quotes or dashes) + // ETag format varies, so we just ensure it's a valid non-empty string + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncLargeFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\LargeFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 20 * MEG_SIZE; // Large file for multipart upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For multipart uploads, ETag format is different (contains dashes) + // We just validate it's a valid string for now + Console.WriteLine($"ETag (multipart): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncStreamTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\StreamFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 5 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + using (var fileStream = File.OpenRead(path)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + InputStream = fileStream, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + Console.WriteLine($"ETag (stream): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually streamed and uploaded correctly + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded stream size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + + // Validate content by downloading and comparing + var downloadPath = path + ".download"; + await transferUtility.DownloadAsync(new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = fileName, + FilePath = downloadPath + }); + UtilityMethods.CompareFiles(path, downloadPath); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncWithChecksumTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\ChecksumFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 2 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + // Calculate checksum for the file + var fileBytes = File.ReadAllBytes(path); + var precalculatedChecksum = CryptoUtilFactory.CryptoInstance.ComputeCRC32Hash(fileBytes); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType, + ChecksumCRC32 = precalculatedChecksum + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // Validate checksum fields if they should be present + // Note: Checksum fields in response may not always be set depending on S3 behavior + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"ChecksumCRC32: {response.ChecksumCRC32}"); + Console.WriteLine($"ChecksumType: {response.ChecksumType}"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncCompareWithLegacyUploadTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\CompareFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 8 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + // Test the new UploadWithResponseAsync method + var responseRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-with-response", + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(responseRequest); + + // Test the legacy Upload method for comparison + var legacyRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-legacy", + ContentType = octetStreamContentType + }; + + await transferUtility.UploadAsync(legacyRequest); + + // Validate that both uploads resulted in the same file being uploaded + var responseMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-with-response" + }); + + var legacyMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-legacy" + }); + + // Both should have the same file size and content type + Assert.AreEqual(responseMetadata.ContentLength, legacyMetadata.ContentLength, "File sizes should match"); + Assert.AreEqual(responseMetadata.Headers.ContentType, legacyMetadata.Headers.ContentType, "Content types should match"); + + // Validate the response contains the expected ETag + Assert.IsNotNull(response.ETag, "Response ETag should not be null"); + Assert.AreEqual(response.ETag, responseMetadata.ETag, "Response ETag should match metadata ETag"); + + Console.WriteLine($"UploadWithResponseAsync ETag: {response.ETag}"); + Console.WriteLine($"Legacy upload ETag: {legacyMetadata.ETag}"); + Console.WriteLine($"File size: {fileSize}, Response metadata size: {responseMetadata.ContentLength}"); + } + } + #if ASYNC_AWAIT [TestMethod] From 3266894dfbdfd225486f83aa5125cb8b5adf8ac8 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 7 Nov 2025 09:41:09 -0500 Subject: [PATCH 19/56] Add DownloadInitiated, Failed and Completed events (#4079) --- .../9d07dc1e-d82d-4f94-8700-c7b57f872123.json | 11 + .../S3/Custom/Model/GetObjectResponse.cs | 6 + .../Transfer/Internal/DownloadCommand.cs | 34 +++ .../Internal/_async/DownloadCommand.async.cs | 25 +- .../TransferUtilityDownloadRequest.cs | 249 ++++++++++++++++++ .../IntegrationTests/TransferUtilityTests.cs | 185 +++++++++++++ 6 files changed, 508 insertions(+), 2 deletions(-) create mode 100644 generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json new file mode 100644 index 000000000000..1838e718b587 --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added DownloadInitiatedEvent, DownloadCompletedEvent, and DownloadFailedEvent for TransferUtility Download." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs index bae8fc4147b5..44c3eaddc6fe 100644 --- a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs +++ b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs @@ -25,6 +25,7 @@ using Amazon.S3.Model.Internal.MarshallTransformations; using Amazon.S3; using Amazon.Runtime.Internal; +using Amazon.S3.Transfer; namespace Amazon.S3.Model { @@ -1042,5 +1043,10 @@ internal WriteObjectProgressArgs(string bucketName, string key, string filePath, /// True if writing is complete /// public bool IsCompleted { get; private set; } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// + public TransferUtilityDownloadRequest Request { get; internal set; } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs index f8e45d7b20fe..bca43c615b05 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs @@ -62,6 +62,34 @@ static Logger Logger IAmazonS3 _s3Client; TransferUtilityDownloadRequest _request; + long _totalTransferredBytes; + + #region Event Firing Methods + + private void FireTransferInitiatedEvent() + { + var transferInitiatedEventArgs = new DownloadInitiatedEventArgs(_request, _request.FilePath); + _request.OnRaiseTransferInitiatedEvent(transferInitiatedEventArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityDownloadResponse response, string filePath, long transferredBytes, long totalBytes) + { + var transferCompletedEventArgs = new DownloadCompletedEventArgs( + _request, + response, + filePath, + transferredBytes, + totalBytes); + _request.OnRaiseTransferCompletedEvent(transferCompletedEventArgs); + } + + private void FireTransferFailedEvent(string filePath, long transferredBytes, long totalBytes = -1) + { + var eventArgs = new DownloadFailedEventArgs(this._request, filePath, transferredBytes, totalBytes); + this._request.OnRaiseTransferFailedEvent(eventArgs); + } + + #endregion internal DownloadCommand(IAmazonS3 s3Client, TransferUtilityDownloadRequest request) { @@ -89,6 +117,12 @@ private void ValidateRequest() void OnWriteObjectProgressEvent(object sender, WriteObjectProgressArgs e) { + // Keep track of the total transferred bytes so that we can also return this value in case of failure + Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred); + + // Set the Request property to enable access to the original download request + e.Request = this._request; + this._request.OnRaiseProgressEvent(e); } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs index 6baef9262774..1f0d689b96e8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs @@ -33,12 +33,17 @@ internal partial class DownloadCommand : BaseCommand ExecuteAsync(CancellationToken cancellationToken) { ValidateRequest(); + + FireTransferInitiatedEvent(); + GetObjectRequest getRequest = ConvertToGetObjectRequest(this._request); var maxRetries = _s3Client.Config.MaxErrorRetry; var retries = 0; bool shouldRetry = false; string mostRecentETag = null; + TransferUtilityDownloadResponse lastSuccessfulMappedResponse = null; + long? totalBytesFromResponse = null; // Track total bytes once we have response headers do { shouldRetry = false; @@ -54,12 +59,16 @@ public override async Task ExecuteAsync(Cancell using (var response = await this._s3Client.GetObjectAsync(getRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false)) { + // Capture total bytes from response headers as soon as we get them + totalBytesFromResponse = response.ContentLength; + if (!string.IsNullOrEmpty(mostRecentETag) && !string.Equals(mostRecentETag, response.ETag)) { //if the eTag changed, we need to retry from the start of the file mostRecentETag = response.ETag; getRequest.ByteRange = null; retries = 0; + Interlocked.Exchange(ref _totalTransferredBytes, 0); shouldRetry = true; WaitBeforeRetry(retries); continue; @@ -101,6 +110,8 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, false, can await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); } + + lastSuccessfulMappedResponse = ResponseMapper.MapGetObjectResponse(response); } } catch (Exception exception) @@ -109,6 +120,9 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, canc shouldRetry = HandleExceptionForHttpClient(exception, retries, maxRetries); if (!shouldRetry) { + // Pass total bytes if we have them from response headers, otherwise -1 for unknown + FireTransferFailedEvent(this._request.FilePath, Interlocked.Read(ref _totalTransferredBytes), totalBytesFromResponse ?? -1); + if (exception is IOException) { throw; @@ -130,9 +144,16 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, canc } WaitBeforeRetry(retries); } while (shouldRetry); + + // This should never happen under normal logic flow since we always throw exception on error. + if (lastSuccessfulMappedResponse == null) + { + throw new InvalidOperationException("Download completed without any successful response. This indicates a logical error in the retry handling."); + } + + FireTransferCompletedEvent(lastSuccessfulMappedResponse, this._request.FilePath, Interlocked.Read(ref _totalTransferredBytes), totalBytesFromResponse ?? -1); - // TODO map and return response - return new TransferUtilityDownloadResponse(); + return lastSuccessfulMappedResponse; } private static bool HandleExceptionForHttpClient(Exception exception, int retries, int maxRetries) diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs index d9a4bc5c7119..f7ba5f97b943 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs @@ -90,5 +90,254 @@ internal void OnRaiseProgressEvent(WriteObjectProgressArgs progressArgs) { AWSSDKUtils.InvokeInBackground(WriteObjectProgressEvent, progressArgs, this); } + + /// + /// The event for DownloadInitiatedEvent notifications. All + /// subscribers will be notified when a download transfer operation + /// starts. + /// + /// The DownloadInitiatedEvent is fired exactly once when + /// a download transfer operation begins. The delegates attached to the event + /// will be passed information about the download request and + /// file path, but no progress information. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// DownloadInitiatedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void downloadStarted(object sender, DownloadInitiatedEventArgs args) + /// { + /// Console.WriteLine($"Download started: {args.FilePath}"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the DownloadInitiatedEvent delegate's invocation list + /// + /// TransferUtilityDownloadRequest request = new TransferUtilityDownloadRequest(); + /// request.DownloadInitiatedEvent += downloadStarted; + /// + ///
+ public event EventHandler DownloadInitiatedEvent; + + /// + /// The event for DownloadCompletedEvent notifications. All + /// subscribers will be notified when a download transfer operation + /// completes successfully. + /// + /// The DownloadCompletedEvent is fired exactly once when + /// a download transfer operation completes successfully. The delegates attached to the event + /// will be passed information about the completed download including + /// the final response from S3 with ETag, VersionId, and other metadata. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// DownloadCompletedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void downloadCompleted(object sender, DownloadCompletedEventArgs args) + /// { + /// Console.WriteLine($"Download completed: {args.FilePath}"); + /// Console.WriteLine($"Transferred: {args.TransferredBytes} bytes"); + /// Console.WriteLine($"ETag: {args.Response.ETag}"); + /// Console.WriteLine($"S3 Key: {args.Response.Key}"); + /// Console.WriteLine($"Version ID: {args.Response.VersionId}"); + /// } + /// + /// 2. Add this method to the DownloadCompletedEvent delegate's invocation list + /// + /// TransferUtilityDownloadRequest request = new TransferUtilityDownloadRequest(); + /// request.DownloadCompletedEvent += downloadCompleted; + /// + ///
+ public event EventHandler DownloadCompletedEvent; + + /// + /// The event for DownloadFailedEvent notifications. All + /// subscribers will be notified when a download transfer operation + /// fails. + /// + /// The DownloadFailedEvent is fired exactly once when + /// a download transfer operation fails. The delegates attached to the event + /// will be passed information about the failed download including + /// partial progress information, but no response data since the download failed. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// DownloadFailedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void downloadFailed(object sender, DownloadFailedEventArgs args) + /// { + /// Console.WriteLine($"Download failed: {args.FilePath}"); + /// Console.WriteLine($"Partial progress: {args.TransferredBytes} bytes"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the DownloadFailedEvent delegate's invocation list + /// + /// TransferUtilityDownloadRequest request = new TransferUtilityDownloadRequest(); + /// request.DownloadFailedEvent += downloadFailed; + /// + ///
+ public event EventHandler DownloadFailedEvent; + + /// + /// Causes the DownloadInitiatedEvent event to be fired. + /// + /// DownloadInitiatedEventArgs args + internal void OnRaiseTransferInitiatedEvent(DownloadInitiatedEventArgs args) + { + AWSSDKUtils.InvokeInBackground(DownloadInitiatedEvent, args, this); + } + + /// + /// Causes the DownloadCompletedEvent event to be fired. + /// + /// DownloadCompletedEventArgs args + internal void OnRaiseTransferCompletedEvent(DownloadCompletedEventArgs args) + { + AWSSDKUtils.InvokeInBackground(DownloadCompletedEvent, args, this); + } + + /// + /// Causes the DownloadFailedEvent event to be fired. + /// + /// DownloadFailedEventArgs args + internal void OnRaiseTransferFailedEvent(DownloadFailedEventArgs args) + { + AWSSDKUtils.InvokeInBackground(DownloadFailedEvent, args, this); + } + } + + /// + /// Encapsulates the information needed when a download transfer operation is initiated. + /// Provides access to the original request without progress or total byte information. + /// + public class DownloadInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadInitiatedEventArgs class. + /// + /// The original TransferUtilityDownloadRequest created by the user + /// The file being downloaded + internal DownloadInitiatedEventArgs(TransferUtilityDownloadRequest request, string filePath) + { + Request = request; + FilePath = filePath; + } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// Contains all the download parameters and configuration. + /// + public TransferUtilityDownloadRequest Request { get; private set; } + + /// + /// Gets the file being downloaded. + /// + public string FilePath { get; private set; } + } + + /// + /// Encapsulates the information needed when a download transfer operation completes successfully. + /// Provides access to the original request, final response, and completion details. + /// + public class DownloadCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadCompletedEventArgs class. + /// + /// The original TransferUtilityDownloadRequest created by the user + /// The unified response from Transfer Utility + /// The file being downloaded + /// The total number of bytes transferred + /// The total number of bytes for the complete file + internal DownloadCompletedEventArgs(TransferUtilityDownloadRequest request, TransferUtilityDownloadResponse response, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + Response = response; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// Contains all the download parameters and configuration. + /// + public TransferUtilityDownloadRequest Request { get; private set; } + + /// + /// The unified response from Transfer Utility after successful download completion. + /// Contains mapped fields from GetObjectResponse. + /// + public TransferUtilityDownloadResponse Response { get; private set; } + + /// + /// Gets the file being downloaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the total number of bytes that were successfully transferred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes for the complete file. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Encapsulates the information needed when a download transfer operation fails. + /// Provides access to the original request and partial progress information. + /// + public class DownloadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadFailedEventArgs class. + /// + /// The original TransferUtilityDownloadRequest created by the user + /// The file being downloaded + /// The number of bytes transferred before failure + /// The total number of bytes for the complete file, or -1 if unknown + internal DownloadFailedEventArgs(TransferUtilityDownloadRequest request, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// Contains all the download parameters and configuration. + /// + public TransferUtilityDownloadRequest Request { get; private set; } + + /// + /// Gets the file being downloaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the number of bytes that were transferred before the failure occurred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes for the complete file, or -1 if unknown. + /// This will be -1 for failures that occur before receiving the GetObjectResponse + /// (e.g., authentication errors, non-existent objects), and will contain the actual + /// file size for failures that occur after receiving response headers (e.g., disk full). + /// + public long TotalBytes { get; private set; } } } diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 967f07d4884f..0cb09b347c74 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -1320,6 +1320,112 @@ public void DownloadProgressZeroLengthFileTest() progressValidator.AssertOnCompletion(); } + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadInitiatedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\InitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + // Note: DownloadInitiatedEventArgs does not have TotalBytes since we don't know the size until GetObjectResponse + } + }; + DownloadWithLifecycleEvents(fileName, 10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadCompletedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\CompletedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(10 * MEG_SIZE, args.TotalBytes); + Assert.IsTrue(!string.IsNullOrEmpty(args.Response.ETag)); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + } + }; + DownloadWithLifecycleEvents(fileName, 10 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\FailedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + + // Non-existent key should always be early failure with unknown total bytes + Assert.AreEqual(-1, args.TotalBytes, "Non-existent key should result in TotalBytes = -1"); + Assert.AreEqual(0, args.TransferredBytes, "No bytes should be transferred for non-existent key"); + } + }; + + // Use non-existent key to force failure + var nonExistentKey = "non-existent-key-" + Guid.NewGuid().ToString(); + + try + { + DownloadWithLifecycleEventsAndKey(fileName, nonExistentKey, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for non-existent key"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadCompleteLifecycleTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\CompleteLifecycle"); + + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + // Note: DownloadInitiatedEventArgs does not have TotalBytes since we don't know the size until GetObjectResponse + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(8 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + } + }; + + DownloadWithLifecycleEvents(fileName, 8 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + void Download(string fileName, long size, TransferProgressValidator progressValidator) { var key = fileName; @@ -2121,6 +2227,85 @@ void UploadUnseekableStreamWithLifecycleEventsAndBucket(long size, string target transferUtility.Upload(request); } + + void DownloadWithLifecycleEvents(string fileName, long size, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + // First upload the file so we have something to download + var key = fileName; + var originalFilePath = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(originalFilePath, size); + + Client.PutObject(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = originalFilePath + }); + + var downloadedFilePath = originalFilePath + ".download"; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = key + }; + + if (initiatedValidator != null) + { + request.DownloadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.DownloadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.DownloadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Download(request); + } + + void DownloadWithLifecycleEventsAndKey(string fileName, string keyToDownload, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var downloadedFilePath = Path.Combine(BasePath, fileName + ".download"); + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = keyToDownload + }; + + if (initiatedValidator != null) + { + request.DownloadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.DownloadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.DownloadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Download(request); + } + private class UnseekableStream : MemoryStream { private readonly bool _setZeroLengthStream; From 6e1ebab71bb98724a2be7154c33fa584fa98c705 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Tue, 11 Nov 2025 12:09:24 -0500 Subject: [PATCH 20/56] Populate TransferUtilityDownloadDirectoryResponse with total objects downloaded (#4109) --- .../c49077d9-90b3-437f-b316-6d8d8833ae73.json | 11 +++++++++++ .../_bcl+netstandard/DownloadDirectoryCommand.cs | 6 +++++- .../TransferUtilityDownloadDirectoryResponse.cs | 4 ++++ 3 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json new file mode 100644 index 000000000000..3a1b9218e539 --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Populate TransferUtilityDownloadDirectoryResponse with total objects downloaded" + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index be3fb4f0ae33..9382ab33b757 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -108,12 +108,16 @@ await asyncThrottler.WaitAsync(cancellationToken) var command = new DownloadCommand(this._s3Client, downloadRequest); var task = ExecuteCommandAsync(command, internalCts, asyncThrottler); + pendingTasks.Add(task); } await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - return new TransferUtilityDownloadDirectoryResponse(); + return new TransferUtilityDownloadDirectoryResponse + { + ObjectsDownloaded = _numberOfFilesDownloaded + }; } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs index 6df0c1c5a619..098087e26143 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs @@ -22,5 +22,9 @@ namespace Amazon.S3.Transfer ///
public class TransferUtilityDownloadDirectoryResponse { + /// + /// The number of objects that have been downloaded + /// + public long ObjectsDownloaded { get; set; } } } From c0693f83affb20e859773f91eb8a605419d70360 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 19 Nov 2025 09:51:55 -0500 Subject: [PATCH 21/56] Added UploadWithResponse and UploadWithResponseAsync methods to ITransferUtility interface (#4143) --- .../f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json | 11 ++ .../Transfer/_async/ITransferUtility.async.cs | 116 +++++++++++++++ .../Transfer/_async/TransferUtility.async.cs | 137 +----------------- .../_bcl+netstandard/ITransferUtility.sync.cs | 108 ++++++++++++++ .../_bcl+netstandard/TransferUtility.sync.cs | 100 +------------ 5 files changed, 243 insertions(+), 229 deletions(-) create mode 100644 generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json diff --git a/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json b/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json new file mode 100644 index 000000000000..bc5e6350ecb8 --- /dev/null +++ b/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added UploadWithResponse and UploadWithResponseAsync methods to ITransferUtility interface" + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs index d67a94b00856..938bebf7653e 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs @@ -168,6 +168,122 @@ public partial interface ITransferUtility : IDisposable /// /// The task object representing the asynchronous operation. Task UploadAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)); #endregion #region AbortMultipartUploads diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index 92307954b039..e1c52c2a6e68 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -218,157 +218,28 @@ public partial class TransferUtility : ITransferUtility } } - /// - /// Uploads the specified file and returns response metadata. - /// The object key is derived from the file's name. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation with upload response metadata. + /// public async Task UploadWithResponseAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName); return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads the specified file and returns response metadata. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation with upload response metadata. + /// public async Task UploadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName, key); return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads the contents of the specified stream and returns response metadata. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The stream to read to obtain the content to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation with upload response metadata. + /// public async Task UploadWithResponseAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(stream, bucketName, key); return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads the file or stream specified by the request and returns response metadata. - /// To track the progress of the upload, - /// add an event listener to the request's UploadProgressEvent. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The part size buffer for the multipart upload is controlled by the partSize - /// specified on the TransferUtilityUploadRequest, and if none is specified it defaults to S3Constants.MinPartSize (5 megabytes). - /// You can also adjust the read buffer size (i.e. how many bytes to read before adding it to the - /// part buffer) via the BufferSize property on the ClientConfig. The default value for this is 8192 bytes. - /// - /// - /// - /// Contains all the parameters required to upload to Amazon S3. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation with upload response metadata. + /// public async Task UploadWithResponseAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(UploadWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index a492f922a7d2..8444104739e2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -318,5 +318,113 @@ public partial interface ITransferUtility void AbortMultipartUploads(string bucketName, DateTime initiatedDate); #endregion + + #region UploadWithResponse + + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName); + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key); + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key); + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request); + + #endregion } } diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index c4d99745e3da..9a627d30c282 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -285,30 +285,7 @@ public void Upload(TransferUtilityUploadRequest request) } } - /// - /// Uploads the specified file and returns response metadata. - /// The object key is derived from the file's name. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// The upload response metadata. + /// public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName) { try @@ -322,32 +299,7 @@ public TransferUtilityUploadResponse UploadWithResponse(string filePath, string } } - /// - /// Uploads the specified file and returns response metadata. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// The upload response metadata. + /// public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key) { try @@ -361,31 +313,7 @@ public TransferUtilityUploadResponse UploadWithResponse(string filePath, string } } - /// - /// Uploads the contents of the specified stream and returns response metadata. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The stream to read to obtain the content to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// The upload response metadata. + /// public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key) { try @@ -399,27 +327,7 @@ public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bu } } - /// - /// Uploads the file or stream specified by the request and returns response metadata. - /// To track the progress of the upload, - /// add an event listener to the request's UploadProgressEvent. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// Contains all the parameters required to upload to Amazon S3. - /// - /// The upload response metadata. + /// public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request) { try From 42e25ce3844d59125b87d9b0405a19807fb1ad91 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 19 Nov 2025 12:54:47 -0500 Subject: [PATCH 22/56] Update TransferUtilityConfig and BaseDownloadRequest to add multi part download config options (#4120) --- .../19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json | 13 ++++ .../S3/Custom/Transfer/BaseDownloadRequest.cs | 60 ++++++++++++++++++- .../Custom/Transfer/TransferUtilityConfig.cs | 17 ++++++ 3 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json diff --git a/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json new file mode 100644 index 000000000000..08f9ff96ce53 --- /dev/null +++ b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json @@ -0,0 +1,13 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added MaxInMemoryParts property to TransferUtilityConfig for controlling memory usage during multipart downloads", + "Added PartSize property to BaseDownloadRequest for configuring multipart download part sizes", + "Added MultipartDownloadType enum and property to BaseDownloadRequest for selecting download strategy" + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs index 84bc08c29225..fb85827cfafd 100644 --- a/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs @@ -28,6 +28,22 @@ namespace Amazon.S3.Transfer { + /// + /// Specifies the strategy for multipart downloads + /// + public enum MultipartDownloadType + { + /// + /// Use part-based downloads with original upload part boundaries + /// + PART, + + /// + /// Use range-based downloads with configurable part sizes + /// + RANGE + } + /// /// The base class for requests that return Amazon S3 objects. /// @@ -50,6 +66,8 @@ public abstract class BaseDownloadRequest private string ifMatch; private string ifNoneMatch; private ResponseHeaderOverrides responseHeaders; + private long? partSize; + private MultipartDownloadType multipartDownloadType = MultipartDownloadType.PART; /// /// Gets or sets the name of the bucket. @@ -330,5 +348,45 @@ public ResponseHeaderOverrides ResponseHeaderOverrides this.responseHeaders = value; } } + + /// + /// Gets or sets the part size of the download in bytes. + /// The downloaded file will be divided into + /// parts the size specified and + /// downloaded from Amazon S3 individually. + /// This is used when MultipartDownloadType is set to RANGE. + /// + /// + /// The part size of the download. + /// + public long PartSize + { + get { return this.partSize.GetValueOrDefault(); } + set { this.partSize = value; } + } + + /// + /// Checks if PartSize property is set. + /// + /// true if PartSize property is set. + internal bool IsSetPartSize() + { + return this.partSize.HasValue; + } + + /// + /// Gets or sets the type of multipart download to use. + /// PART: Uses part GET with original part sizes from upload (ignores PartSize) + /// RANGE: Uses ranged GET with PartSize to determine ranges + /// Default is PART + /// + /// + /// The multipart download type. + /// + public MultipartDownloadType MultipartDownloadType + { + get { return this.multipartDownloadType; } + set { this.multipartDownloadType = value; } + } } -} \ No newline at end of file +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs index c652bd4a5a36..993d671263fe 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs @@ -42,6 +42,7 @@ public partial class TransferUtilityConfig { long _minSizeBeforePartUpload = 16 * (long)Math.Pow(2, 20); int _concurrentServiceRequests; + int _maxInMemoryParts = 1024; // When combined with the default part size of 8MB, we get 8GB of memory being utilized as the default. /// /// Default constructor. @@ -81,5 +82,21 @@ public int ConcurrentServiceRequests this._concurrentServiceRequests = value; } } + + /// + /// Gets or sets the maximum number of parts to buffer in memory during multipart downloads. + /// The default value is 1024. + /// + public int MaxInMemoryParts + { + get { return this._maxInMemoryParts; } + set + { + if (value < 1) + value = 1; + + this._maxInMemoryParts = value; + } + } } } From da1fc1e2e50ba901c795536279b4cd6061f0b961 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 26 Nov 2025 15:26:27 -0500 Subject: [PATCH 23/56] Fix content language initialization (#4168) --- sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs | 11 ++--------- .../GetObjectResponseUnmarshaller.cs | 1 - 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs index 44c3eaddc6fe..bf06d655ed56 100644 --- a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs +++ b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs @@ -69,7 +69,6 @@ public partial class GetObjectResponse : StreamResponse private string _checksumSHA1; private string _checksumSHA256; private ChecksumType _checksumType; - private string _contentLanguage; /// /// The date and time at which the object is no longer cacheable. @@ -175,14 +174,8 @@ internal bool IsSetContentRange() /// public string ContentLanguage { - get { return this._contentLanguage; } - set { this._contentLanguage = value; } - } - - // Check to see if ContentLanguage property is set - internal bool IsSetContentLanguage() - { - return this._contentLanguage != null; + get { return this.Headers.ContentLanguage; } + set { this.Headers.ContentLanguage = value; } } /// diff --git a/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs b/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs index 171aebac022e..2e0b4a0197b9 100644 --- a/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs +++ b/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs @@ -86,7 +86,6 @@ private static void UnmarshallResult(XmlUnmarshallerContext context,GetObjectRes if (responseData.IsHeaderPresent("Content-Encoding")) response.Headers.ContentEncoding = S3Transforms.ToString(responseData.GetHeaderValue("Content-Encoding")); if (responseData.IsHeaderPresent("Content-Language")) - response.ContentLanguage = S3Transforms.ToString(responseData.GetHeaderValue("Content-Language")); response.Headers.ContentLanguage = S3Transforms.ToString(responseData.GetHeaderValue("Content-Language")); if (responseData.IsHeaderPresent("Content-Length")) response.Headers.ContentLength = long.Parse(responseData.GetHeaderValue("Content-Length"), CultureInfo.InvariantCulture); From b35969659ea65fbfd43949ca189c5c28f6fcac75 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 28 Nov 2025 10:01:49 -0500 Subject: [PATCH 24/56] Multi Part Download + OpenStreamWithResponseAsync (#4130) --- .../9d07dc1e-d82d-4f94-8700-c7b57f872042.json | 11 + .../Generators/SourceFiles/AssemblyInfo.cs | 7 +- .../Generators/SourceFiles/AssemblyInfo.tt | 1 + .../Custom/Transfer/Internal/BaseCommand.cs | 40 +- .../Transfer/Internal/BufferedDataSource.cs | 156 ++ .../Internal/BufferedDownloadConfiguration.cs | 66 + .../Internal/BufferedMultipartStream.cs | 311 ++++ .../Internal/BufferedPartDataHandler.cs | 192 ++ .../Internal/DownloadManagerConfiguration.cs | 64 + .../Transfer/Internal/IDownloadManager.cs | 83 + .../Transfer/Internal/IPartBufferManager.cs | 85 + .../Transfer/Internal/IPartDataHandler.cs | 70 + .../Transfer/Internal/IPartDataSource.cs | 53 + .../Internal/MultipartDownloadManager.cs | 594 ++++++ .../Internal/OpenStreamWithResponseCommand.cs | 48 + .../Transfer/Internal/PartBufferManager.cs | 613 +++++++ .../Custom/Transfer/Internal/RequestMapper.cs | 92 + .../Transfer/Internal/StreamPartBuffer.cs | 175 ++ .../Custom/Transfer/Internal/TaskHelpers.cs | 87 + .../AbortMultipartUploadsCommand.async.cs | 2 +- .../Internal/_async/BaseCommand.async.cs | 52 - .../_async/MultipartUploadCommand.async.cs | 2 +- .../_async/OpenStreamCommand.async.cs | 1 - .../OpenStreamWithResponseCommand.async.cs | 85 + .../DownloadDirectoryCommand.cs | 2 +- .../UploadDirectoryCommand.cs | 2 +- .../Transfer/_async/ITransferUtility.async.cs | 141 ++ .../Transfer/_async/TransferUtility.async.cs | 215 +-- .../_bcl+netstandard/ITransferUtility.sync.cs | 139 ++ .../_bcl+netstandard/TransferUtility.sync.cs | 290 +-- .../Services/S3/Properties/AssemblyInfo.cs | 1 + .../TransferUtilityOpenStreamTests.cs | 474 +++++ .../Custom/BufferedDataSourceTests.cs | 487 +++++ .../BufferedDownloadConfigurationTests.cs | 151 ++ .../Custom/BufferedMultipartStreamTests.cs | 1283 +++++++++++++ .../Custom/BufferedPartDataHandlerTests.cs | 585 ++++++ .../Custom/MultipartDownloadManagerTests.cs | 1623 +++++++++++++++++ .../Custom/MultipartDownloadTestHelpers.cs | 596 ++++++ .../OpenStreamWithResponseCommandTests.cs | 359 ++++ .../Custom/PartBufferManagerTests.cs | 1007 ++++++++++ .../UnitTests/Custom/StreamPartBufferTests.cs | 396 ++++ 41 files changed, 10113 insertions(+), 528 deletions(-) create mode 100644 generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDataSource.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDownloadConfiguration.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/DownloadManagerConfiguration.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataSource.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamWithResponseCommand.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/RequestMapper.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/StreamPartBuffer.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs create mode 100644 sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/BufferedDataSourceTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/BufferedDownloadConfigurationTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/StreamPartBufferTests.cs diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json new file mode 100644 index 000000000000..92a91ce271dd --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Created new OpenStreamWithResponseAsync method on the Amazon.S3.Transfer.TransferUtility class. The new operation supports downloading in parallel parts of the S3 object in the background while reading from the stream for improved performance." + ] + } + ] +} \ No newline at end of file diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs index 156c2b897efe..8eec59e0554a 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs @@ -94,9 +94,10 @@ public override string TransformText() #line hidden this.Write(@"[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] [assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] +[assembly: InternalsVisibleTo(""DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7"")] "); - #line 32 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 33 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" } #line default @@ -125,14 +126,14 @@ public override string TransformText() // [assembly: AssemblyVersion(""1.0.*"")] [assembly: AssemblyVersion("""); - #line 55 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 56 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceVersion)); #line default #line hidden this.Write("\")]\r\n[assembly: AssemblyFileVersion(\""); - #line 56 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 57 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceFileVersion)); #line default diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt index ab2cf5d21a23..31dceb950beb 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt @@ -29,6 +29,7 @@ using System.Runtime.CompilerServices; <# if (this.Config.AssemblyTitle=="AWSSDK.S3") { #> [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] +[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] <# } #> [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs index 71d2685e6fda..da7e39c69e3c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs @@ -38,43 +38,11 @@ internal abstract partial class BaseCommand where TResponse : class { internal GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) { - GetObjectRequest getRequest = new GetObjectRequest() - { - BucketName = request.BucketName, - Key = request.Key, - VersionId = request.VersionId - }; - ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)getRequest).AddBeforeRequestHandler(this.RequestEventHandler); - - if (request.IsSetModifiedSinceDate()) - { - getRequest.ModifiedSinceDate = request.ModifiedSinceDate; - } - if (request.IsSetUnmodifiedSinceDate()) - { - getRequest.UnmodifiedSinceDate = request.UnmodifiedSinceDate; - } - - getRequest.ServerSideEncryptionCustomerMethod = request.ServerSideEncryptionCustomerMethod; - getRequest.ServerSideEncryptionCustomerProvidedKey = request.ServerSideEncryptionCustomerProvidedKey; - getRequest.ServerSideEncryptionCustomerProvidedKeyMD5 = request.ServerSideEncryptionCustomerProvidedKeyMD5; - getRequest.ChecksumMode = request.ChecksumMode; - getRequest.RequestPayer = request.RequestPayer; - - if (request.IsSetExpectedBucketOwner()) - { - getRequest.ExpectedBucketOwner = request.ExpectedBucketOwner; - } - if (request.IsSetIfMatch()) - { - getRequest.EtagToMatch = request.IfMatch; - } - if (request.IsSetIfNoneMatch()) - { - getRequest.EtagToNotMatch = request.IfNoneMatch; - } + // Use centralized request mapping + GetObjectRequest getRequest = RequestMapper.MapToGetObjectRequest(request); - getRequest.ResponseHeaderOverrides = request.ResponseHeaderOverrides; + // Add command-specific event handler + ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)getRequest).AddBeforeRequestHandler(this.RequestEventHandler); return getRequest; } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDataSource.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDataSource.cs new file mode 100644 index 000000000000..db6ae3c8d00c --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDataSource.cs @@ -0,0 +1,156 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using Amazon.Runtime.Internal.Util; +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// ArrayPool-based buffered data source that reads from pre-buffered part data. + /// Manages ArrayPool lifecycle and provides efficient buffer-to-buffer copying. + /// + internal class BufferedDataSource : IPartDataSource + { + private readonly StreamPartBuffer _partBuffer; + private bool _disposed = false; + + #region Logger + + private Logger Logger + { + get + { + return Logger.GetLogger(typeof(TransferUtility)); + } + } + + #endregion + + /// + public int PartNumber => _partBuffer.PartNumber; + + /// + public bool IsComplete => _partBuffer.RemainingBytes == 0; + + /// + /// Initializes a new instance of the class. + /// + /// The containing the buffered part data. + /// Thrown when is null. + public BufferedDataSource(StreamPartBuffer partBuffer) + { + _partBuffer = partBuffer ?? throw new ArgumentNullException(nameof(partBuffer)); + + Logger.DebugFormat("BufferedDataSource: Created for part {0} (BufferLength={1}, RemainingBytes={2})", + _partBuffer.PartNumber, _partBuffer.Length, _partBuffer.RemainingBytes); + } + + /// + public Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + try + { + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + if (_partBuffer.RemainingBytes == 0) + { + Logger.DebugFormat("BufferedDataSource: [Part {0}] Reached end of buffer (RemainingBytes=0)", _partBuffer.PartNumber); + return Task.FromResult(0); // End of part + } + + // Calculate bytes to copy from buffered part + var availableBytes = _partBuffer.RemainingBytes; + var bytesToRead = Math.Min(count, availableBytes); + + Logger.DebugFormat("BufferedDataSource: [Part {0}] Reading {1} bytes (Requested={2}, Available={3}, CurrentPosition={4})", + _partBuffer.PartNumber, bytesToRead, count, availableBytes, _partBuffer.CurrentPosition); + + Buffer.BlockCopy( + _partBuffer.ArrayPoolBuffer, // Source: ArrayPool buffer + _partBuffer.CurrentPosition, // Source offset + buffer, // Destination: user buffer + offset, // Destination offset + bytesToRead // Bytes to copy + ); + + // Update position in the part buffer + _partBuffer.CurrentPosition += bytesToRead; + + Logger.DebugFormat("BufferedDataSource: [Part {0}] Read complete (BytesRead={1}, NewPosition={2}, RemainingBytes={3}, IsComplete={4})", + _partBuffer.PartNumber, bytesToRead, _partBuffer.CurrentPosition, _partBuffer.RemainingBytes, IsComplete); + + return Task.FromResult(bytesToRead); + } + catch (Exception ex) + { + Logger.Error(ex, "BufferedDataSource: [Part {0}] Error during read: {1}", _partBuffer.PartNumber, ex.Message); + + // On any error during read (including validation), mark the buffer as consumed to prevent further reads + _partBuffer.CurrentPosition = _partBuffer.Length; + throw; + } + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(BufferedDataSource)); + } + + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + Logger.DebugFormat("BufferedDataSource: [Part {0}] Disposing (Returning buffer to ArrayPool)", _partBuffer.PartNumber); + + // Dispose the underlying StreamPartBuffer, which returns ArrayPool buffer to pool + _partBuffer?.Dispose(); + } + catch (Exception ex) + { + Logger.Error(ex, "BufferedDataSource: [Part {0}] Error during disposal: {1}", _partBuffer.PartNumber, ex.Message); + + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDownloadConfiguration.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDownloadConfiguration.cs new file mode 100644 index 000000000000..3d0f0b2baf1e --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDownloadConfiguration.cs @@ -0,0 +1,66 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Configuration settings for buffered multipart downloads to streams. + /// Extends base coordinator settings with buffer-specific parameters. + /// + internal class BufferedDownloadConfiguration : DownloadManagerConfiguration + { + /// + /// Maximum parts to keep in memory simultaneously. + /// + public int MaxInMemoryParts { get; set; } + + /// + /// Buffer size for I/O operations. + /// + public int BufferSize { get; set; } + + /// + /// Creates a BufferedDownloadConfiguration with the specified configuration values. + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// Maximum number of parts to keep in memory simultaneously. + /// Buffer size used for optimal I/O operations. + /// Target size for each part in bytes. + /// Thrown when any parameter is less than or equal to 0. + public BufferedDownloadConfiguration( + int concurrentServiceRequests, + int maxInMemoryParts, + int bufferSize, + long targetPartSizeBytes) + : base(concurrentServiceRequests, targetPartSizeBytes) + { + if (maxInMemoryParts <= 0) + throw new ArgumentOutOfRangeException(nameof(maxInMemoryParts), "Must be greater than 0"); + if (bufferSize <= 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize), "Must be greater than 0"); + + MaxInMemoryParts = maxInMemoryParts; + BufferSize = bufferSize; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs new file mode 100644 index 000000000000..0534d3c100a4 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs @@ -0,0 +1,311 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Stream implementation for SEP-compliant multipart downloads to streams. + /// Uses modular architecture with dependency injection for improved maintainability and testability. + /// Supports both single-part and multipart downloads with optimal performance for each scenario. + /// + internal class BufferedMultipartStream : Stream + { + private readonly IDownloadManager _downloadCoordinator; + private readonly IPartBufferManager _partBufferManager; + private readonly BufferedDownloadConfiguration _config; + + private bool _initialized = false; + private bool _disposed = false; + private DownloadDiscoveryResult _discoveryResult; + private long _totalBytesRead = 0; + + private Logger Logger + { + get { return Logger.GetLogger(typeof(TransferUtility)); } + } + + /// + /// Gets the containing metadata from the initial GetObject response. + /// Available after completes successfully. + /// + public DownloadDiscoveryResult DiscoveryResult => _discoveryResult; + + /// + /// Creates a new with dependency injection. + /// + /// that coordinates download discovery and orchestration. + /// that manages part buffer lifecycle and synchronization. + /// with settings for the stream. + public BufferedMultipartStream(IDownloadManager downloadCoordinator, IPartBufferManager partBufferManager, BufferedDownloadConfiguration config) + { + _downloadCoordinator = downloadCoordinator ?? throw new ArgumentNullException(nameof(downloadCoordinator)); + _partBufferManager = partBufferManager ?? throw new ArgumentNullException(nameof(partBufferManager)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + } + + /// + /// Factory method to create with default dependencies. + /// + /// client for making requests. + /// with stream request parameters. + /// with transfer utility configuration. + /// Optional for user agent tracking. + /// A new instance. + public static BufferedMultipartStream Create(IAmazonS3 s3Client, TransferUtilityOpenStreamRequest request, TransferUtilityConfig transferConfig, RequestEventHandler requestEventHandler = null) + { + if (s3Client == null) throw new ArgumentNullException(nameof(s3Client)); + if (request == null) throw new ArgumentNullException(nameof(request)); + if (transferConfig == null) throw new ArgumentNullException(nameof(transferConfig)); + + // Determine target part size from request or use 8MB default + long targetPartSize = request.IsSetPartSize() + ? request.PartSize + : S3Constants.DefaultPartSize; + + var config = new BufferedDownloadConfiguration( + transferConfig.ConcurrentServiceRequests, + transferConfig.MaxInMemoryParts, + s3Client.Config.BufferSize, + targetPartSize); + + var partBufferManager = new PartBufferManager(config); + var dataHandler = new BufferedPartDataHandler(partBufferManager, config); + var downloadCoordinator = new MultipartDownloadManager(s3Client, request, config, dataHandler, requestEventHandler); + + return new BufferedMultipartStream(downloadCoordinator, partBufferManager, config); + } + + /// + /// Initialize the stream by discovering download strategy and setting up appropriate handlers. + /// + /// Cancellation token for the initialization operation. + public async Task InitializeAsync(CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (_initialized) + throw new InvalidOperationException("Stream has already been initialized"); + + Logger.DebugFormat("BufferedMultipartStream: Starting initialization"); + + _discoveryResult = await _downloadCoordinator.DiscoverDownloadStrategyAsync(cancellationToken) + .ConfigureAwait(false); + + Logger.DebugFormat("BufferedMultipartStream: Discovery completed - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", + _discoveryResult.ObjectSize, + _discoveryResult.TotalParts, + _discoveryResult.IsSinglePart); + + await _downloadCoordinator.StartDownloadsAsync(_discoveryResult, cancellationToken) + .ConfigureAwait(false); + + _initialized = true; + Logger.DebugFormat("BufferedMultipartStream: Initialization completed successfully"); + } + + /// + /// Asynchronously reads a sequence of bytes from the stream and advances the position within the stream by the number of bytes read. + /// + /// The buffer to read data into. + /// The byte offset in at which to begin storing data. + /// The maximum number of bytes to read. + /// A token to cancel the read operation. + /// + /// A task that represents the asynchronous read operation. The value of the task's result contains the total number of bytes read into the buffer. + /// This can be less than the number of bytes requested if that number of bytes are not currently available, or zero if the end of the stream is reached. + /// + /// The stream has been disposed. + /// The stream has not been initialized. Call first. + /// is null. + /// or is negative. + /// The sum of and is greater than the buffer length. + /// + /// This method reads data from the underlying which coordinates sequential reading + /// from buffered multipart download data. The method automatically handles reading across part boundaries to fill + /// the provided buffer when possible, matching standard behavior. + /// + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (!_initialized) + throw new InvalidOperationException("Stream must be initialized before reading. Call InitializeAsync first."); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + var currentPosition = Interlocked.Read(ref _totalBytesRead); + Logger.DebugFormat("BufferedMultipartStream: ReadAsync called - Position={0}, RequestedBytes={1}", + currentPosition, count); + + var bytesRead = await _partBufferManager.ReadAsync(buffer, offset, count, cancellationToken) + .ConfigureAwait(false); + + // Track total bytes read for Position property + if (bytesRead > 0) + { + Interlocked.Add(ref _totalBytesRead, bytesRead); + Logger.DebugFormat("BufferedMultipartStream: ReadAsync completed - BytesRead={0}, NewPosition={1}", + bytesRead, currentPosition + bytesRead); + } + else + { + Logger.DebugFormat("BufferedMultipartStream: ReadAsync returned EOF (0 bytes)"); + } + + return bytesRead; + } + + + #region Stream Implementation + + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + + public override long Length + { + get + { + if (!_initialized) + throw new InvalidOperationException("Stream must be initialized before accessing Length"); + return _discoveryResult.ObjectSize; + } + } + + public override long Position + { + get + { + if (!_initialized) + throw new InvalidOperationException("Stream must be initialized before accessing Position"); + return Interlocked.Read(ref _totalBytesRead); + } + set => throw new NotSupportedException("Position setter not supported for read-only streams"); + } + + /// + /// Flushes any buffered data to the underlying stream. This is a no-op for read-only streams. + /// + public override void Flush() { } + + /// + /// Asynchronously flushes any buffered data to the underlying stream. This is a no-op for read-only streams. + /// + /// A token to cancel the operation. + /// A completed task. + public override Task FlushAsync(CancellationToken cancellationToken) => Task.CompletedTask; + + /// + /// Synchronously reads a sequence of bytes from the stream and advances the position within the stream by the number of bytes read. + /// + /// The buffer to read data into. + /// The byte offset in at which to begin storing data. + /// The maximum number of bytes to read. + /// + /// The total number of bytes read into the buffer. This can be less than the number of bytes requested if that number of bytes + /// are not currently available, or zero if the end of the stream is reached. + /// + /// + /// This is a synchronous wrapper around . + /// For better performance, prefer using the asynchronous version when possible. + /// + public override int Read(byte[] buffer, int offset, int count) + { + return ReadAsync(buffer, offset, count).GetAwaiter().GetResult(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("Seek not supported for multipart download streams"); + } + + public override void SetLength(long value) + { + throw new NotSupportedException("SetLength not supported for read-only streams"); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("Write not supported for read-only streams"); + } + + #endregion + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(BufferedMultipartStream)); + } + + #region Dispose Pattern + + /// + /// Releases the unmanaged resources used by the and optionally releases the managed resources. + /// + /// + /// true to release both managed and unmanaged resources; false to release only unmanaged resources. + /// + /// + /// This method disposes the underlying and , + /// which in turn cleans up any buffered part data and returns ArrayPool buffers to the pool. + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + protected override void Dispose(bool disposing) + { + if (!_disposed && disposing) + { + try + { + // Dispose modular dependencies + _downloadCoordinator?.Dispose(); + _partBufferManager?.Dispose(); + } + catch (Exception) + { + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + + base.Dispose(disposing); + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs new file mode 100644 index 000000000000..9ce05f6e71f6 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -0,0 +1,192 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Buffers; +using System.Threading; +using System.Threading.Tasks; +using System.IO; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Buffers downloaded parts in memory using and . + /// Implements current streaming behavior for multipart downloads. + /// + internal class BufferedPartDataHandler : IPartDataHandler + { + private readonly IPartBufferManager _partBufferManager; + private readonly BufferedDownloadConfiguration _config; + + private Logger Logger + { + get { return Logger.GetLogger(typeof(TransferUtility)); } + } + + /// + /// Initializes a new instance of the class. + /// + /// The for managing part buffers. + /// The with buffering settings. + /// Thrown when any required parameter is null. + public BufferedPartDataHandler( + IPartBufferManager partBufferManager, + BufferedDownloadConfiguration config) + { + _partBufferManager = partBufferManager ?? throw new ArgumentNullException(nameof(partBufferManager)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + } + + /// + public async Task ProcessPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Starting to buffer part from response stream - ContentLength={1}", + partNumber, response.ContentLength); + + // Buffer the part from the response stream into memory + var buffer = await BufferPartFromResponseAsync( + partNumber, + response, + cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Buffered {1} bytes into memory", + partNumber, buffer.Length); + + // Add the buffered part to the buffer manager + await _partBufferManager.AddBufferAsync(buffer, cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Added to buffer manager", + partNumber); + } + + /// + public Task WaitForCapacityAsync(CancellationToken cancellationToken) + { + return _partBufferManager.WaitForBufferSpaceAsync(cancellationToken); + } + + /// + public void ReleaseCapacity() + { + _partBufferManager.ReleaseBufferSpace(); + } + + /// + public void OnDownloadComplete(Exception exception) + { + _partBufferManager.MarkDownloadComplete(exception); + } + + /// + public void Dispose() + { + // _partBufferManager is owned by caller, don't dispose + } + + private async Task BufferPartFromResponseAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + StreamPartBuffer downloadedPart = null; + + try + { + // Use ContentLength to determine exact bytes to read and allocate + long expectedBytes = response.ContentLength; + int initialBufferSize = (int)expectedBytes; + + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Allocating buffer of size {1} bytes from ArrayPool", + partNumber, initialBufferSize); + + downloadedPart = StreamPartBuffer.Create(partNumber, initialBufferSize); + + // Get reference to the buffer for writing + var partBuffer = downloadedPart.ArrayPoolBuffer; + + int totalRead = 0; + int chunkCount = 0; + + // Read response stream into buffer in chunks based on ContentLength. + // Example: For a 10MB part with 8KB BufferSize: + // - Loop 1: remainingBytes=10MB, readSize=8KB → reads 8KB at offset 0 + // - Loop 2: remainingBytes=9.992MB, readSize=8KB → reads 8KB at offset 8KB + // - ...continues until totalRead reaches 10MB (1,280 iterations) + while (totalRead < expectedBytes) + { + // Calculate how many bytes we still need to read + int remainingBytes = (int)(expectedBytes - totalRead); + + // Read in chunks up to BufferSize, but never exceed remaining bytes + int readSize = Math.Min(remainingBytes, _config.BufferSize); + + // Read directly into buffer at current position + int bytesRead = await response.ResponseStream.ReadAsync( + partBuffer, + totalRead, + readSize, + cancellationToken).ConfigureAwait(false); + + if (bytesRead == 0) + { + var errorMessage = $"Unexpected end of stream while downloading part {partNumber}. " + + $"Expected {expectedBytes} bytes but only received {totalRead} bytes. " + + $"This indicates a network error or S3 service issue."; + + Logger.Error(null, "BufferedPartDataHandler: [Part {0}] {1}", + partNumber, errorMessage); + + throw new IOException(errorMessage); + } + + totalRead += bytesRead; + chunkCount++; + } + + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Read {1} bytes in {2} chunks from response stream", + partNumber, totalRead, chunkCount); + + // Set the length to reflect actual bytes read + downloadedPart.SetLength(totalRead); + + if (totalRead != expectedBytes) + { + Logger.Error(null, "BufferedPartDataHandler: [Part {0}] Size mismatch - Expected {1} bytes, read {2} bytes", + partNumber, expectedBytes, totalRead); + } + + return downloadedPart; + } + catch (Exception ex) + { + Logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to buffer part from response stream", partNumber); + // If something goes wrong, StreamPartBuffer.Dispose() will handle cleanup + downloadedPart?.Dispose(); + throw; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadManagerConfiguration.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadManagerConfiguration.cs new file mode 100644 index 000000000000..35161aabba90 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadManagerConfiguration.cs @@ -0,0 +1,64 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Base configuration for multipart download coordination. + /// Contains settings common to all download strategies. + /// + internal class DownloadManagerConfiguration + { + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// + public int ConcurrentServiceRequests { get; set; } + + /// + /// Target part size in bytes. + /// + public long TargetPartSizeBytes { get; set; } + + /// + /// Protected default constructor for derived classes. + /// + protected DownloadManagerConfiguration() { } + + /// + /// Creates a DownloadManagerConfiguration with the specified configuration values. + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// Target size for each part in bytes. + /// Thrown when any parameter is less than or equal to 0. + public DownloadManagerConfiguration(int concurrentServiceRequests, long targetPartSizeBytes) + { + if (concurrentServiceRequests <= 0) + throw new ArgumentOutOfRangeException(nameof(concurrentServiceRequests), "Must be greater than 0"); + if (targetPartSizeBytes <= 0) + throw new ArgumentOutOfRangeException(nameof(targetPartSizeBytes), "Must be greater than 0"); + + ConcurrentServiceRequests = concurrentServiceRequests; + TargetPartSizeBytes = targetPartSizeBytes; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs new file mode 100644 index 000000000000..7bf997608cfc --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs @@ -0,0 +1,83 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Coordinates multipart downloads including discovery, concurrent downloads, and progress reporting. + /// + internal interface IDownloadManager : IDisposable + { + /// + /// Discovers whether the object requires single-part or multipart downloading. + /// + /// A token to cancel the discovery operation. + /// + /// A task containing discovery results including total parts, object size, + /// and initial response data if single-part. + /// + Task DiscoverDownloadStrategyAsync(CancellationToken cancellationToken); + + /// + /// Starts concurrent downloads with HTTP concurrency control and part range calculations. + /// + /// Results from the discovery phase. + /// A token to cancel the download operation. + /// A task that completes when all downloads finish or an error occurs. + Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken); + + /// + /// Exception that occurred during downloads, if any. + /// + Exception DownloadException { get; } + } + + /// + /// Download discovery results with metadata for determining download strategy. + /// + internal class DownloadDiscoveryResult + { + /// + /// Total parts needed (1 = single-part, >1 = multipart). + /// + public int TotalParts { get; set; } + + /// + /// Total size of the object in bytes. + /// + public long ObjectSize { get; set; } + + /// + /// GetObjectResponse obtained during download initialization, containing the ResponseStream. Represents the complete object for single-part downloads or the first range/part for multipart downloads. + /// + public GetObjectResponse InitialResponse { get; set; } + + /// + /// Whether this is a single-part download. + /// + public bool IsSinglePart => TotalParts == 1; + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs new file mode 100644 index 000000000000..004c27092eae --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs @@ -0,0 +1,85 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Manages part buffers with ArrayPool lifecycle and concurrency control. + /// + internal interface IPartBufferManager : IDisposable + { + /// + /// Waits for available buffer space before downloading a new part. + /// + /// A token to cancel the wait operation. + /// A task that completes when buffer space becomes available. + Task WaitForBufferSpaceAsync(CancellationToken cancellationToken); + + /// + /// Adds a part data source and signals readers when next expected part arrives. + /// + /// The part data source to add. + void AddDataSource(IPartDataSource dataSource); + + /// + /// Adds a downloaded part buffer and signals readers when next expected part arrives. + /// + /// The downloaded part buffer to add. + /// A token to cancel the operation. + /// A task that completes when the buffer has been added and signaling is complete. + Task AddBufferAsync(StreamPartBuffer buffer, CancellationToken cancellationToken); + + /// + /// Reads data from the buffer manager. Automatically handles sequential part consumption + /// and reads across part boundaries to fill the buffer when possible, matching standard Stream.Read() behavior. + /// + /// The buffer to read data into. + /// The offset in the buffer. + /// The maximum number of bytes to read. + /// A token to cancel the operation. + /// + /// A task whose result contains the number of bytes read. + /// Returns 0 bytes read when end of stream is reached. + /// + Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken); + + /// + /// Releases buffer space to allow new parts to be downloaded. + /// + void ReleaseBufferSpace(); + + /// + /// Next expected part number in the sequence. + /// + int NextExpectedPartNumber { get; } + + /// + /// Marks download completion and handles end-of-stream. + /// + /// Any exception that occurred during downloads, or null if successful. + void MarkDownloadComplete(Exception exception); + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs new file mode 100644 index 000000000000..63acd951a062 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs @@ -0,0 +1,70 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Strategy interface for processing downloaded part data. + /// Implementations determine WHERE and HOW downloaded parts are stored. + /// Enables separation of download orchestration from data handling (buffering, file writing, etc). + /// + internal interface IPartDataHandler : IDisposable + { + /// + /// Process a downloaded part from the GetObjectResponse. + /// Implementation decides whether to buffer in memory, write to file, etc. + /// + /// 1-based part number + /// GetObjectResponse with ResponseStream to process + /// Cancellation token + /// Task that completes when part processing is done + Task ProcessPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken); + + /// + /// Wait for available capacity to process another part. + /// Allows implementations to implement backpressure (memory limits, concurrency, etc). + /// + /// Cancellation token + /// Task that completes when capacity becomes available + Task WaitForCapacityAsync(CancellationToken cancellationToken); + + /// + /// Release capacity after a part is processed/consumed. + /// Pairs with WaitForCapacityAsync for backpressure management. + /// + void ReleaseCapacity(); + + /// + /// Called when all downloads complete (successfully or with error). + /// Allows implementations to perform cleanup or commit operations. + /// + /// Exception if download failed, null if successful + void OnDownloadComplete(Exception exception); + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataSource.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataSource.cs new file mode 100644 index 000000000000..b1ace5ebcbd9 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataSource.cs @@ -0,0 +1,53 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Interface for buffered part data sources. + /// + internal interface IPartDataSource : IDisposable + { + /// + /// Reads data from the ArrayPool buffer into the destination buffer. + /// + /// Destination buffer + /// Offset in destination buffer + /// Maximum bytes to read + /// Cancellation token + /// Number of bytes actually read + Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken); + + /// + /// Whether this data source has been fully consumed. + /// + bool IsComplete { get; } + + /// + /// Part number this data source represents. + /// + int PartNumber { get; } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs new file mode 100644 index 000000000000..a22700560903 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -0,0 +1,594 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Coordinates multipart downloads using PART or RANGE strategies per SEP spec. + /// Handles discovery, concurrent downloads. + /// + internal class MultipartDownloadManager : IDownloadManager + { + private readonly IAmazonS3 _s3Client; + private readonly BaseDownloadRequest _request; + private readonly DownloadManagerConfiguration _config; + private readonly IPartDataHandler _dataHandler; + private readonly SemaphoreSlim _httpConcurrencySlots; + private readonly RequestEventHandler _requestEventHandler; + + private Exception _downloadException; + private bool _disposed = false; + private bool _discoveryCompleted = false; + private Task _downloadCompletionTask; + + private string _savedETag; + private int _discoveredPartCount; + + private Logger Logger + { + get { return Logger.GetLogger(typeof(TransferUtility)); } + } + + /// + /// Initializes a new instance of the class. + /// + /// The client for making S3 requests. + /// The containing download parameters. + /// The with download settings. + /// The for processing downloaded parts. + /// Optional for user agent tracking. + /// Thrown when any required parameter is null. + public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler = null) + { + _s3Client = s3Client ?? throw new ArgumentNullException(nameof(s3Client)); + _request = request ?? throw new ArgumentNullException(nameof(request)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + _dataHandler = dataHandler ?? throw new ArgumentNullException(nameof(dataHandler)); + _requestEventHandler = requestEventHandler; + + _httpConcurrencySlots = new SemaphoreSlim(_config.ConcurrentServiceRequests); + } + + /// + public Exception DownloadException + { + get + { + return _downloadException; + } + } + + /// + /// Gets a task that completes when all download tasks have finished. + /// Returns a completed task for single-part downloads. + /// For multipart downloads, this task can be awaited to observe exceptions from background downloads. + /// + public Task DownloadCompletionTask => _downloadCompletionTask ?? Task.CompletedTask; + + /// + public async Task DiscoverDownloadStrategyAsync(CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (_discoveryCompleted) + throw new InvalidOperationException("Discovery has already been performed"); + + Logger.DebugFormat("MultipartDownloadManager: Starting discovery with strategy={0}", + _request.MultipartDownloadType); + + try + { + // Use strategy-specific discovery based on MultipartDownloadType + var result = _request.MultipartDownloadType == MultipartDownloadType.PART + ? await DiscoverUsingPartStrategyAsync(cancellationToken).ConfigureAwait(false) + : await DiscoverUsingRangeStrategyAsync(cancellationToken).ConfigureAwait(false); + + _discoveryCompleted = true; + + Logger.InfoFormat("MultipartDownloadManager: Discovery complete - ObjectSize={0}, TotalParts={1}, Strategy={2}, ETagPresent={3}", + result.ObjectSize, + result.TotalParts, + _request.MultipartDownloadType, + !string.IsNullOrEmpty(_savedETag)); + + return result; + } + catch (Exception ex) + { + _downloadException = ex; + Logger.Error(ex, "MultipartDownloadManager: Discovery failed"); + throw; + } + } + + /// + public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (discoveryResult == null) + throw new ArgumentNullException(nameof(discoveryResult)); + + Logger.DebugFormat("MultipartDownloadManager: Starting downloads - TotalParts={0}, IsSinglePart={1}", + discoveryResult.TotalParts, discoveryResult.IsSinglePart); + + var downloadTasks = new List(); + var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + + try + { + // Process Part 1 from InitialResponse (applies to both single-part and multipart) + Logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); + await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); + + if (discoveryResult.IsSinglePart) + { + // Single-part: Part 1 is the entire object + Logger.DebugFormat("MultipartDownloadManager: Single-part download complete"); + _dataHandler.OnDownloadComplete(null); + return; + } + + // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) + Logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", + discoveryResult.TotalParts); + + for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) + { + var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, internalCts.Token); + downloadTasks.Add(task); + } + + // Store count before WhenAllOrFirstException (which modifies the list internally) + var expectedTaskCount = downloadTasks.Count; + + Logger.DebugFormat("MultipartDownloadManager: Starting {0} download tasks in background", expectedTaskCount); + + // Check if already cancelled before creating background task + cancellationToken.ThrowIfCancellationRequested(); + + // Start background task to wait for all downloads to complete + // This allows the method to return immediately so the consumer can start reading + // which prevents deadlock when MaxInMemoryParts is reached before consumer begins reading + _downloadCompletionTask = Task.Run(async () => + { + try + { + Logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); + + // Wait for all downloads to complete (fails fast on first exception) + await TaskHelpers.WhenAllOrFirstExceptionAsync(downloadTasks, cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); + + // SEP Part GET Step 6 / Ranged GET Step 8: + // "validate that the total number of part GET requests sent matches with the expected PartsCount" + // Note: This should always be true if we reach this point, since WhenAllOrFirstException + // ensures all tasks completed successfully (or threw on first failure). + // The check serves as a defensive assertion for SEP compliance. + // Note: expectedTaskCount + 1 accounts for Part 1 being buffered during discovery + if (expectedTaskCount + 1 != discoveryResult.TotalParts) + { + throw new InvalidOperationException( + $"Request count mismatch. Expected {discoveryResult.TotalParts} parts, " + + $"but sent {expectedTaskCount + 1} requests"); + } + + // Mark successful completion + Logger.InfoFormat("MultipartDownloadManager: Download completed successfully - TotalParts={0}", + discoveryResult.TotalParts); + _dataHandler.OnDownloadComplete(null); + } + #pragma warning disable CA1031 // Do not catch general exception types + + catch (Exception ex) + { + _downloadException = ex; + Logger.Error(ex, "MultipartDownloadManager: Background download task failed"); + _dataHandler.OnDownloadComplete(ex); + throw; + } + #pragma warning restore CA1031 // Do not catch general exception types + }, cancellationToken); + + // Return immediately to allow consumer to start reading + // This prevents deadlock when buffer fills up before consumer begins reading + Logger.DebugFormat("MultipartDownloadManager: Returning to allow consumer to start reading"); + } + catch (Exception ex) + { + _downloadException = ex; + Logger.Error(ex, "MultipartDownloadManager: Download failed"); + + _dataHandler.OnDownloadComplete(ex); + throw; + } + finally + { + internalCts.Dispose(); + } + } + + + + private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, CancellationToken cancellationToken) + { + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNumber); + + // Wait for capacity before starting download + await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNumber); + + GetObjectResponse response = null; + + try + { + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", + partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + + // Limit HTTP concurrency + await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNumber); + + try + { + // Create strategy-specific request + GetObjectRequest getObjectRequest; + + if (_request.MultipartDownloadType == MultipartDownloadType.PART) + { + // PART strategy: Use part number from original upload + getObjectRequest = CreateGetObjectRequest(); + getObjectRequest.PartNumber = partNumber; + + // SEP Part GET Step 4: "The S3 Transfer Manager MUST also set IfMatch member + // for each request to the Etag value saved from Step 3" + getObjectRequest.EtagToMatch = _savedETag; + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with PartNumber={1}, IfMatchPresent={2}", + partNumber, partNumber, !string.IsNullOrEmpty(_savedETag)); + } + else + { + // RANGE strategy: Use calculated byte range + var (startByte, endByte) = CalculatePartRange(partNumber, objectSize); + + getObjectRequest = CreateGetObjectRequest(); + getObjectRequest.ByteRange = new ByteRange(startByte, endByte); + + // SEP Ranged GET Step 6: "The S3 Transfer Manager MUST also set IfMatch member + // for each request to the value saved from Step 5" + getObjectRequest.EtagToMatch = _savedETag; + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with ByteRange={1}-{2}, IfMatchPresent={3}", + partNumber, startByte, endByte, !string.IsNullOrEmpty(_savedETag)); + } + + response = await _s3Client.GetObjectAsync(getObjectRequest, cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] GetObject response received - ContentLength={1}", + partNumber, response.ContentLength); + + // SEP Part GET Step 5 / Ranged GET Step 7: Validate ContentRange matches request + ValidateContentRange(response, partNumber, objectSize); + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] ContentRange validation passed", partNumber); + + // Validate ETag consistency for SEP compliance + if (!string.IsNullOrEmpty(_savedETag) && !string.Equals(_savedETag, response.ETag, StringComparison.OrdinalIgnoreCase)) + { + Logger.Error(null, "MultipartDownloadManager: [Part {0}] ETag mismatch detected - object modified during download", partNumber); + throw new InvalidOperationException($"ETag mismatch detected for part {partNumber} - object may have been modified during download"); + } + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] ETag validation passed", partNumber); + } + finally + { + _httpConcurrencySlots.Release(); + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released (Available: {1}/{2})", + partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + } + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Starting buffering", partNumber); + + // Delegate data handling to the handler + await _dataHandler.ProcessPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffering completed successfully", partNumber); + } + catch (Exception ex) + { + Logger.Error(ex, "MultipartDownloadManager: [Part {0}] Download failed", partNumber); + // Release capacity on failure + _dataHandler.ReleaseCapacity(); + throw; + } + finally + { + // Always dispose the response since we never transfer ownership + response?.Dispose(); + } + } + + + private async Task DiscoverUsingPartStrategyAsync(CancellationToken cancellationToken) + { + // SEP Part GET Step 1: "create a new GetObject request copying all fields in DownloadRequest. + // Set partNumber to 1." + var firstPartRequest = CreateGetObjectRequest(); + firstPartRequest.PartNumber = 1; + + // SEP Part GET Step 2: "send the request and wait for the response in a non-blocking fashion" + var firstPartResponse = await _s3Client.GetObjectAsync(firstPartRequest, cancellationToken).ConfigureAwait(false); + + // SEP Part GET Step 3: Save ETag for later IfMatch validation in subsequent requests + _savedETag = firstPartResponse.ETag; + + // SEP Part GET Step 3: "check the response. First parse total content length from ContentRange + // of the GetObject response and save the value in a variable. The length is the numeric value + // after / delimiter. For example, given ContentRange=bytes 0-1/5, 5 is the total content length. + // Then check PartsCount." + if (firstPartResponse.PartsCount.HasValue && firstPartResponse.PartsCount.Value > 1) + { + // SEP Part GET Step 3: "If PartsCount in the response is larger than 1, it indicates there + // are more parts available to download. The S3 Transfer Manager MUST save etag from the + // response to a variable." + _discoveredPartCount = firstPartResponse.PartsCount.Value; + + // Parse total content length from ContentRange header + // For example, "bytes 0-5242879/52428800" -> extract 52428800 + var totalObjectSize = ExtractTotalSizeFromContentRange(firstPartResponse.ContentRange); + + // SEP Part GET Step 7 will use this response for creating DownloadResponse + // Keep the response with its stream (will be buffered in StartDownloadsAsync) + return new DownloadDiscoveryResult + { + TotalParts = firstPartResponse.PartsCount.Value, + ObjectSize = totalObjectSize, + InitialResponse = firstPartResponse // Keep response with stream + }; + } + else + { + // SEP Part GET Step 3: "If PartsCount is 1, go to Step 7." + _discoveredPartCount = 1; + + // Single part upload - return the response for immediate use (SEP Step 7) + return new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = firstPartResponse.ContentLength, + InitialResponse = firstPartResponse // Keep response with stream + }; + } + } + + private async Task DiscoverUsingRangeStrategyAsync(CancellationToken cancellationToken) + { + // Get target part size for RANGE strategy (already set in config from request or default) + var targetPartSize = _config.TargetPartSizeBytes; + + // SEP Ranged GET Step 1: "create a new GetObject request copying all fields in the original request. + // Set range value to bytes=0-{targetPartSizeBytes-1} to request the first part." + var firstRangeRequest = CreateGetObjectRequest(); + firstRangeRequest.ByteRange = new ByteRange(0, targetPartSize - 1); + + // SEP Ranged GET Step 2: "send the request and wait for the response in a non-blocking fashion" + var firstRangeResponse = await _s3Client.GetObjectAsync(firstRangeRequest, cancellationToken).ConfigureAwait(false); + + // SEP Ranged GET Step 5: "save Etag from the response to a variable" + // (for IfMatch validation in subsequent requests) + _savedETag = firstRangeResponse.ETag; + + // SEP Ranged GET Step 3: "parse total content length from ContentRange of the GetObject response + // and save the value in a variable. The length is the numeric value after / delimiter. + // For example, given ContentRange=bytes0-1/5, 5 is the total content length." + // Check if ContentRange is null (object smaller than requested range) + if (firstRangeResponse.ContentRange == null) + { + // No ContentRange means we got the entire small object + _discoveredPartCount = 1; + + return new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = firstRangeResponse.ContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + + + // Parse total object size from ContentRange (e.g., "bytes 0-5242879/52428800" -> 52428800) + var totalContentLength = ExtractTotalSizeFromContentRange(firstRangeResponse.ContentRange); + + // SEP Ranged GET Step 4: "compare the parsed total content length from Step 3 with ContentLength + // of the response. If the parsed total content length equals to the value from ContentLength, + // it indicates this request contains all of the data. The request is finished, return the response." + if (totalContentLength == firstRangeResponse.ContentLength) + { + // Single part: total size equals returned ContentLength + // This request contains all of the data + _discoveredPartCount = 1; + + return new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = totalContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + + // SEP Ranged GET Step 4: "If they do not match, it indicates there are more parts available + // to download. Add a validation to verify that ContentLength equals to the targetPartSizeBytes." + if (firstRangeResponse.ContentLength != targetPartSize) + { + throw new InvalidOperationException( + $"Expected first part size {targetPartSize} bytes, but received {firstRangeResponse.ContentLength} bytes. " + + $"Total object size is {totalContentLength} bytes."); + } + + // SEP Ranged GET Step 5: "calculate number of requests required by performing integer division + // of total contentLength/targetPartSizeBytes. Save the number of ranged GET requests in a variable." + _discoveredPartCount = (int)Math.Ceiling((double)totalContentLength / targetPartSize); + + // SEP Ranged GET Step 9 will use this response for creating DownloadResponse + // Keep the response with its stream (will be buffered in StartDownloadsAsync) + return new DownloadDiscoveryResult + { + TotalParts = _discoveredPartCount, + ObjectSize = totalContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + + private GetObjectRequest CreateGetObjectRequest() + { + var request = RequestMapper.MapToGetObjectRequest(_request); + + // Attach user agent handler if provided + if (_requestEventHandler != null) + { + ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)request) + .AddBeforeRequestHandler(_requestEventHandler); + } + + return request; + } + + internal (long startByte, long endByte) CalculatePartRange(int partNumber, long objectSize) + { + var targetPartSize = _config.TargetPartSizeBytes; + + var startByte = (partNumber - 1) * targetPartSize; + var endByte = Math.Min(startByte + targetPartSize - 1, objectSize - 1); + return (startByte, endByte); + } + + internal (long startByte, long endByte, long totalSize) ParseContentRange(string contentRange) + { + if (string.IsNullOrEmpty(contentRange)) + throw new InvalidOperationException("Content-Range header is missing"); + + // Format: "bytes {start}-{end}/{total-size}" + var parts = contentRange.Replace("bytes ", "").Split('/'); + if (parts.Length != 2) + throw new InvalidOperationException($"Invalid ContentRange format: {contentRange}"); + + // Parse byte range + var rangeParts = parts[0].Split('-'); + if (rangeParts.Length != 2 || + !long.TryParse(rangeParts[0], out var startByte) || + !long.TryParse(rangeParts[1], out var endByte)) + throw new InvalidOperationException($"Unable to parse ContentRange byte range: {contentRange}"); + + // Parse total size - S3 always returns exact sizes, never wildcards + if (parts[1] == "*") + throw new InvalidOperationException($"Unexpected wildcard in ContentRange total size: {contentRange}. S3 always returns exact object sizes."); + if (!long.TryParse(parts[1], out var totalSize)) + throw new InvalidOperationException($"Unable to parse ContentRange total size: {contentRange}"); + + return (startByte, endByte, totalSize); + } + + internal long ExtractTotalSizeFromContentRange(string contentRange) + { + var (_, _, totalSize) = ParseContentRange(contentRange); + return totalSize; + } + + internal void ValidateContentRange(GetObjectResponse response, int partNumber, long objectSize) + { + // Ranged GET Step 7: + // "validate that ContentRange matches with the requested range" + if (_request.MultipartDownloadType == MultipartDownloadType.RANGE) + { + var (expectedStartByte, expectedEndByte) = CalculatePartRange(partNumber, objectSize); + + // Parse actual ContentRange from response using unified helper + if (string.IsNullOrEmpty(response.ContentRange)) + { + throw new InvalidOperationException($"ContentRange header missing from part {partNumber} response"); + } + + var (actualStartByte, actualEndByte, _) = ParseContentRange(response.ContentRange); + + // Validate range matches what we requested + if (actualStartByte != expectedStartByte || actualEndByte != expectedEndByte) + { + throw new InvalidOperationException( + $"ContentRange mismatch for part {partNumber}. " + + $"Expected: bytes {expectedStartByte}-{expectedEndByte}, " + + $"Actual: bytes {actualStartByte}-{actualEndByte}"); + } + } + + // TODO in future for file based download it also says + // Applicable to destinations to which the SDK writes parts parallelly, e.g., a file + // the content range of the response aligns with the starting offset of the destination to which the SDK writes the part. For example, given a part with content range of bytes 8388608-16777215/33555032, + // it should be written to the file from offset 8,388,608 to 1,6777,215. + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(MultipartDownloadManager)); + } + + #region Dispose Pattern + + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + _httpConcurrencySlots?.Dispose(); + _dataHandler?.Dispose(); + } + catch (Exception) + { + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamWithResponseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamWithResponseCommand.cs new file mode 100644 index 000000000000..7d5b8258c2f8 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamWithResponseCommand.cs @@ -0,0 +1,48 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; + +using Amazon.S3; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Enhanced OpenStream command that uses BufferedMultipartStream for improved multipart download handling. + /// + internal partial class OpenStreamWithResponseCommand : BaseCommand + { + IAmazonS3 _s3Client; + TransferUtilityOpenStreamRequest _request; + TransferUtilityConfig _config; + + internal OpenStreamWithResponseCommand(IAmazonS3 s3Client, TransferUtilityOpenStreamRequest request, TransferUtilityConfig config) + { + this._s3Client = s3Client; + this._request = request; + this._config = config; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs new file mode 100644 index 000000000000..57d700363eb0 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs @@ -0,0 +1,613 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using Amazon.Runtime.Internal.Util; +using System; +using System.Collections.Concurrent; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Manages buffered parts for multipart downloads with memory flow control and sequential consumption. + /// Implements a producer-consumer pattern where download tasks produce buffered parts and the read stream consumes them in order. + /// + /// + /// This class coordinates concurrent multipart downloads while ensuring sequential reading and bounded memory usage. + /// + /// SYNCHRONIZATION PRIMITIVES AND THEIR PURPOSES: + /// + /// 1. _nextExpectedPartNumber (int) + /// - Purpose: Tracks which part to read next, ensuring sequential consumption + /// - Synchronization: None required - only accessed by the consumer thread + /// - Updates: Simple increment (++) after consuming each part + /// - Reads: Direct reads are safe - int reads are naturally atomic + /// - Why no synchronization needed: Producer threads never access this field, + /// only the single consumer thread reads and writes it sequentially + /// + /// 2. _completionState (volatile of bool and ) + /// - Purpose: Atomically tracks download completion status and any error + /// - Synchronization: volatile keyword + atomic reference assignment + /// - Why combined: _downloadComplete and _downloadException must be read together + /// consistently. reference assignment is atomic in .NET (prevents partial reads). + /// - Reads: Direct volatile read gets both values atomically + /// - Writes: Simple assignment is atomic for references, volatile ensures visibility + /// + /// 3. _bufferSpaceAvailable (slot counter) + /// - Purpose: Flow control to limit memory usage by limiting concurrent buffered parts + /// - Capacity: Set to (e.g., 10 parts) + /// - Example: If 10 parts are buffered in memory and part 1 is still being read, a download + /// task attempting to buffer part 11 must wait. Once part 1 is consumed and disposed, + /// its buffer slot is released, allowing part 11 to be buffered. + /// - Critical: Prevents unbounded memory growth during large multipart downloads + /// + /// 4. _partAvailable (signal for readers) + /// - Purpose: Signals when new parts are added or download completes + /// - Signaled by: AddBufferAsync (when new part added), MarkDownloadComplete (when done) + /// - Waited on by: ReadFromCurrentPartAsync (when expected part not yet available) + /// - Example: Reader waits for part 3. When a download task adds part 3 to the dictionary, + /// it signals this event, waking the waiting reader to proceed. + /// - Automatically resets after waking one waiting reader + /// + /// 5. _partDataSources (dictionary storing parts) + /// - Purpose: Thread-safe storage of buffered part data indexed by part number + /// - Key: Part number (allows quickly finding the next part to read) + /// - Example: Download tasks 1-10 run concurrently, each adding their buffered part to the + /// dictionary when ready. The reader sequentially consumes part 1, then 2, then 3, etc., + /// even if they arrived in a different order (e.g., 3, 1, 5, 2, 4). + /// + /// PRODUCER-CONSUMER FLOW: + /// + /// Producer Flow (Download Tasks buffering parts): + /// 1. Wait for buffer space: await + /// - Blocks if are already buffered in memory + /// - Example: With MaxInMemoryParts=10, if parts 5-14 are buffered, the task downloading + /// part 15 blocks here until the reader consumes and releases part 5's buffer + /// 2. Read part data from S3 into pooled buffer + /// 3. Add buffered part: await + /// - Adds buffer to _partDataSources dictionary + /// - Signals _partAvailable to wake consumer if waiting + /// 4. Consumer eventually releases the buffer slot after reading the part + /// + /// Consumer Flow (Read Stream reading parts sequentially): + /// 1. Check if expected part (_nextExpectedPartNumber) is available in dictionary + /// 2. If not available, wait on _partAvailable event + /// - Example: Waiting for part 2, even if parts 3, 5, 7 are already available + /// - Also checks for download completion while waiting to detect end-of-file + /// 3. Once available, read from the part's buffer sequentially + /// 4. When part is fully read ( = true): + /// - Remove part from dictionary + /// - Dispose data source (returns buffer to ArrayPool) + /// - Call (frees slot for producer to buffer next part) + /// - Increment _nextExpectedPartNumber (simple increment, no synchronization needed) + /// 5. Continue to next part to fill caller's buffer across part boundaries if needed + /// + /// SEQUENTIAL GUARANTEE: + /// The _nextExpectedPartNumber field ensures parts are consumed in order, even when they + /// arrive out of order. The consumer always waits for the next sequential part before + /// reading, regardless of what other parts are already buffered. + /// + /// Example scenario with 5-part download: + /// - Download order: Part 3 arrives, then 1, then 5, then 2, then 4 + /// - Parts in dictionary: {3, 1, 5} then {3, 1, 5, 2} then {3, 1, 5, 2, 4} + /// - Reader consumption order: Waits for 1, reads 1, advances to 2, waits for 2, reads 2, + /// advances to 3, reads 3 (already available), advances to 4, waits for 4, etc. + /// - Final read order: 1, 2, 3, 4, 5 (sequential, regardless of arrival order) + /// + /// MEMORY MANAGEMENT: + /// This bounded buffer approach prevents memory exhaustion on large files: + /// - Without flow control: All parts could be buffered simultaneously (e.g., 1000 parts × 10MB = 10GB) + /// - With flow control (=10): Maximum 10 parts buffered (10 × 10MB = 100MB) + /// - The semaphore creates backpressure on download tasks when memory limit is reached + /// + internal class PartBufferManager : IPartBufferManager + { + #region Private members + + // Stores buffered parts by their part number so we can quickly find them. + // Example: If parts arrive as 3, 1, 5, they're stored as {3: buffer3, 1: buffer1, 5: buffer5} + // but consumed in order: 1, 2 (wait), 3, 4 (wait), 5. + private readonly ConcurrentDictionary _partDataSources; + + // Limits how many parts can be buffered in memory at once. + // Capacity set to MaxInMemoryParts (e.g., 10 parts). Download tasks wait here + // before buffering new parts if the limit is reached. Consumers release slots + // after disposing consumed part buffers. + // Example: With limit=10, if parts 1-10 are buffered and part 1 is being read, + // the download task for part 11 blocks here. Once part 1 is consumed and its + // buffer returned to the pool via ReleaseBufferSpace(), part 11 can be buffered. + private readonly SemaphoreSlim _bufferSpaceAvailable; + + // Signals when new parts are added or download completes. + // Automatically resets after waking one waiting reader. + // Signaled by: AddDataSource when new part added, MarkDownloadComplete when finished. + // Waited on by: ReadFromCurrentPartAsync when expected part not yet available. + // Example: Reader waits for part 4. When download task adds part 4, it signals + // this event, immediately waking the reader to proceed with consumption. + private readonly AutoResetEvent _partAvailable; + + // Tracks the next part number to consume sequentially. Ensures in-order reading. + // SYNCHRONIZATION: None required - only accessed by the consumer thread + // Consumer advances this after fully consuming each part with simple increment. + // Example: Set to 1 initially. After reading part 1, incremented to 2. + // Even if part 5 is available, consumer waits for part 2 before proceeding. + // + // Why no synchronization: + // - Only the consumer thread (calling ReadAsync) ever reads or writes this field + // - Producer threads (download tasks) never access it - they only write to the dictionary + // - No concurrent access means no need for volatile, Interlocked, or locks + private int _nextExpectedPartNumber = 1; + + // Stores download completion status and any error as an atomic unit. + // SYNCHRONIZATION: volatile keyword + atomic reference assignment + // Item1: bool indicating if download is complete + // Item2: if download failed, null if successful + // + // Why instead of separate fields: + // - Reference assignment is atomic in .NET (prevents partial reads) + // - volatile ensures all threads see the latest instance + // - Reading the tuple gives us both values consistently in a single atomic operation + // - No race condition where we read complete equals true but exception has not been set yet + // + // Usage: + // Read: var state = _completionState; if (state.Item1) then check state.Item2 for error + // Write: _completionState = Tuple.Create(true, exception); + private volatile Tuple _completionState = Tuple.Create(false, (Exception)null); + + private bool _disposed = false; + + #endregion + + #region Logger + + private Logger Logger + { + get + { + return Logger.GetLogger(typeof(TransferUtility)); + } + } + + #endregion + + /// + /// Initializes a new instance of the class. + /// + /// The with buffer management settings. + /// Thrown when is null. + public PartBufferManager(BufferedDownloadConfiguration config) + { + if (config == null) + throw new ArgumentNullException(nameof(config)); + + _partDataSources = new ConcurrentDictionary(); + _bufferSpaceAvailable = new SemaphoreSlim(config.MaxInMemoryParts); + _partAvailable = new AutoResetEvent(false); + + Logger.DebugFormat("PartBufferManager initialized with MaxInMemoryParts={0}", config.MaxInMemoryParts); + } + + /// + public int NextExpectedPartNumber + { + get + { + // Direct read is safe - only the consumer thread accesses this field + // No synchronization needed: int reads are naturally atomic on all platforms + return _nextExpectedPartNumber; + } + } + + /// + /// + /// This method is called by download tasks before buffering a new part. If + /// are already buffered, the task blocks here until the consumer reads and disposes a part, + /// freeing a slot via . + /// + /// Example: With MaxInMemoryParts=10: + /// - Parts 1-10 are buffered in memory + /// - Download task for part 11 calls this method and blocks + /// - Consumer reads and completes part 1, calls + /// - This method returns, allowing part 11 to be buffered + /// + public async Task WaitForBufferSpaceAsync(CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + var availableBefore = _bufferSpaceAvailable.CurrentCount; + Logger.DebugFormat("PartBufferManager: Waiting for buffer space (Available slots before wait: {0})", availableBefore); + + await _bufferSpaceAvailable.WaitAsync(cancellationToken).ConfigureAwait(false); + + var availableAfter = _bufferSpaceAvailable.CurrentCount; + Logger.DebugFormat("PartBufferManager: Buffer space acquired (Available slots after acquire: {0})", availableAfter); + } + + /// + /// Adds a part data source to the dictionary and signals waiting consumers. + /// + /// The to add. + /// Thrown when is null. + /// Thrown when attempting to add a duplicate part number. + /// + /// This method is thread-safe and can be called concurrently by multiple download tasks. + /// After adding the part to the dictionary, it signals _partAvailable to wake any consumer + /// waiting for this specific part number. + /// + /// Example: Download tasks for parts 3, 1, 5 all call this concurrently: + /// - Each adds to dictionary with their part number as key + /// - Each signals _partAvailable + /// - Consumer waiting for part 1 wakes up when part 1 is added + /// + public void AddDataSource(IPartDataSource dataSource) + { + ThrowIfDisposed(); + + if (dataSource == null) + throw new ArgumentNullException(nameof(dataSource)); + + Logger.DebugFormat("PartBufferManager: Adding part {0} (BufferedParts count before add: {1})", + dataSource.PartNumber, _partDataSources.Count); + + // Add the data source to the collection + if (!_partDataSources.TryAdd(dataSource.PartNumber, dataSource)) + { + // Duplicate part number - this shouldn't happen in normal operation + Logger.Error(null, "PartBufferManager: Duplicate part {0} attempted to be added", dataSource.PartNumber); + dataSource?.Dispose(); // Clean up the duplicate part + throw new InvalidOperationException($"Duplicate part {dataSource.PartNumber} attempted to be added"); + } + + Logger.DebugFormat("PartBufferManager: Part {0} added successfully (BufferedParts count after add: {1}). Signaling _partAvailable.", + dataSource.PartNumber, _partDataSources.Count); + + // Signal that a new part is available + _partAvailable.Set(); + } + + /// + public async Task AddBufferAsync(StreamPartBuffer buffer, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + + // Create a BufferedDataSource and add it + var bufferedSource = new BufferedDataSource(buffer); + AddDataSource(bufferedSource); + } + + /// + public async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + int totalBytesRead = 0; + + // Keep reading until buffer is full or we reach true EOF + // Note: We read across part boundaries to fill the buffer completely, matching standard Stream.Read() behavior + while (totalBytesRead < count) + { + var (bytesRead, shouldContinue) = await ReadFromCurrentPartAsync( + buffer, + offset + totalBytesRead, + count - totalBytesRead, + cancellationToken).ConfigureAwait(false); + + totalBytesRead += bytesRead; + + if (!shouldContinue) + break; + } + + return totalBytesRead; + } + + /// + /// Reads from the current expected part, handling availability, consumption, and cleanup. + /// Returns (bytesRead, shouldContinue) where shouldContinue indicates if more data is available. + /// + /// + /// This method implements the core sequential consumption logic with these responsibilities: + /// - Waiting for the next expected part to arrive (even if later parts already available) + /// - Reading data from the part's buffer + /// - Cleaning up completed parts (disposing buffer, releasing slot, advancing counter) + /// - Detecting download completion and errors + /// + /// SEQUENTIAL CONSUMPTION EXAMPLE: + /// Scenario: Downloading 5-part file, parts arrive out of order + /// + /// Initial state: _nextExpectedPartNumber = 1, dictionary is empty + /// + /// Step 1: Part 3 arrives first + /// - Dictionary: {3: buffer3} + /// - Consumer calls this method, expects part 1 + /// - ContainsKey(1) = false, enters wait loop + /// - Waits on _partAvailable event + /// + /// Step 2: Part 1 arrives + /// - Dictionary: {3: buffer3, 1: buffer1} + /// - AddDataSourceAsync signals _partAvailable + /// - Consumer wakes up, checks ContainsKey(1) = true, exits wait loop + /// - Reads from part 1's buffer + /// - Part 1 becomes complete (IsComplete = true) + /// - Removes part 1 from dictionary: {3: buffer3} + /// - Disposes buffer (returns to ArrayPool) + /// - Releases buffer slot (ReleaseBufferSpace) + /// - Increments counter: _nextExpectedPartNumber = 2 + /// - Returns (bytesRead, shouldContinue=true) to fill more of caller's buffer + /// + /// Step 3: Next iteration, now expecting part 2 + /// - Dictionary: {3: buffer3} + /// - ContainsKey(2) = false, enters wait loop again + /// - Waits for part 2, even though part 3 is already available + /// + /// This continues until all parts are consumed in order: 1, 2, 3, 4, 5 + /// + private async Task<(int bytesRead, bool shouldContinue)> ReadFromCurrentPartAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken) + { + var currentPartNumber = _nextExpectedPartNumber; + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Expecting part {0} (Requested bytes: {1}, BufferedParts count: {2})", + currentPartNumber, count, _partDataSources.Count); + + // Wait for the current part to become available. + // This loop handles out-of-order part arrival - we always wait for the next + // sequential part (_nextExpectedPartNumber) before proceeding, even if later + // parts are already available in the dictionary. + // Example: If parts 3, 5, 7 are available but we need part 2, we wait here. + while (!_partDataSources.ContainsKey(currentPartNumber)) + { + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} not yet available. Waiting on _partAvailable event...", + currentPartNumber); + + // Check for completion first to avoid indefinite waiting. + var state = _completionState; + if (state.Item1) // Check if download is complete + { + if (state.Item2 != null) // Check for exception + { + Logger.Error(state.Item2, "PartBufferManager.ReadFromCurrentPart: Download failed while waiting for part {0}", + currentPartNumber); + throw new InvalidOperationException("Multipart download failed", state.Item2); + } + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Download complete, part {0} not available. Returning EOF.", + currentPartNumber); + // True EOF - all parts downloaded, no more data coming + return (0, false); + } + + // Wait for a part to become available. + // _partAvailable is signaled by: + // 1. AddDataSourceAsync when a new part is added to the dictionary + // 2. MarkDownloadComplete when all download tasks finish + // + // Example: Waiting for part 2. When download task completes buffering part 2 + // and calls AddDataSourceAsync, it signals this event, waking us to check again. + await Task.Run(() => _partAvailable.WaitOne(), cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Woke from _partAvailable wait. Rechecking for part {0}...", + currentPartNumber); + } + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} is available. Reading from data source...", + currentPartNumber); + + // At this point, the expected part is available in the dictionary. + // Double-check with TryGetValue for safety (handles rare race conditions). + if (!_partDataSources.TryGetValue(currentPartNumber, out var dataSource)) + { + // Log technical details for troubleshooting + Logger.Error(null, "PartBufferManager: Part {0} disappeared after availability check. This indicates a race condition in the buffer manager.", currentPartNumber); + + // Throw user-friendly exception + throw new InvalidOperationException("Multipart download failed due to an internal error."); + } + + try + { + // Read from this part's buffer into the caller's buffer. + var partBytesRead = await dataSource.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Read {0} bytes from part {1}. IsComplete={2}", + partBytesRead, currentPartNumber, dataSource.IsComplete); + + // If this part is fully consumed, perform cleanup and advance to next part. + if (dataSource.IsComplete) + { + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} is complete. Cleaning up and advancing to next part...", + currentPartNumber); + + // Remove from collection + _partDataSources.TryRemove(currentPartNumber, out _); + + // Clean up the data source (returns buffer to ArrayPool) + dataSource.Dispose(); + + // Release buffer space slot (allows producer to buffer the next part). + // This is critical for flow control - without this release, download tasks + // would eventually block waiting for space, even though we've consumed this part. + // Example: After consuming part 1 (freeing its slot), download task can now + // buffer part 11 if parts 2-10 are still being held. + ReleaseBufferSpace(); + + // Advance to next part. + _nextExpectedPartNumber++; + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Cleaned up part {0}. Next expected part: {1} (BufferedParts count: {2})", + currentPartNumber, _nextExpectedPartNumber, _partDataSources.Count); + + // Continue reading to fill buffer across part boundaries. + // This matches standard Stream.Read() behavior where we attempt to + // fill the caller's buffer completely before returning, even if it + // requires reading from multiple parts. + // Example: Caller requests 20MB, part 1 has 5MB remaining. We return + // (5MB, shouldContinue=true), then on next iteration read from part 2 + // to try to fill the remaining 15MB. + return (partBytesRead, true); + } + + // If part is not complete but we got 0 bytes, it's EOF + if (partBytesRead == 0) + { + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} returned 0 bytes (EOF)", currentPartNumber); + return (0, false); + } + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} has more data. Returning {1} bytes (will resume on next call)", + currentPartNumber, partBytesRead); + + // Part still has more data available. Return what we read. + // We'll resume from this part on the next ReadAsync call. + return (partBytesRead, false); + } + catch (Exception ex) + { + Logger.Error(ex, "PartBufferManager.ReadFromCurrentPart: Error reading from part {0}: {1}", + currentPartNumber, ex.Message); + + // Clean up on failure to prevent resource leaks + dataSource?.Dispose(); + ReleaseBufferSpace(); + throw; + } + } + + /// + /// + /// Called by the consumer after fully reading and disposing a buffered part. + /// This method releases a slot in the _bufferSpaceAvailable semaphore, which may + /// unblock a download task waiting in . + /// + /// FLOW CONTROL CYCLE: + /// 1. Download task blocks in (slot count = 0) + /// 2. Consumer reads and completes a part + /// 3. Consumer calls this method (slot count = 1) + /// 4. Download task unblocks and can buffer next part + /// + /// Example: With =10, after consuming part 1, this allows part 11 to be buffered. + /// + public void ReleaseBufferSpace() + { + ThrowIfDisposed(); + + // Release buffer space when a consumer finishes with a part + _bufferSpaceAvailable.Release(); + + var availableAfter = _bufferSpaceAvailable.CurrentCount; + Logger.DebugFormat("PartBufferManager: Buffer space released (Available slots after release: {0})", availableAfter); + } + + /// + /// + /// Called by the download coordinator when all download tasks have finished. + /// This signals to the consumer that no more parts will arrive, allowing it to + /// detect end-of-file correctly even if waiting for a part that will never come. + /// + /// SYNCHRONIZATION: Simple assignment is safe because: + /// 1. Reference assignments are atomic in .NET + /// 2. volatile keyword ensures the new is immediately visible to all threads + /// 3. No lock needed - atomicity comes from single reference write + /// + /// Example: All 5 parts downloaded successfully + /// - Download coordinator calls MarkDownloadComplete(null) + /// - Creates new Tuple(true, null) and assigns atomically + /// - Consumer waiting for non-existent part 6 wakes up + /// - Consumer reads atomically, sees Item1=true, Item2=null + /// - Consumer returns EOF (0 bytes) + /// + public void MarkDownloadComplete(Exception exception) + { + if (exception != null) + { + Logger.Error(exception, "PartBufferManager: Download marked complete with error. Signaling completion."); + } + else + { + Logger.DebugFormat("PartBufferManager: Download marked complete successfully. Signaling completion."); + } + + // Create and assign new completion state atomically + // No lock needed: reference assignment is atomic, volatile ensures visibility + _completionState = Tuple.Create(true, exception); + + // Signal that completion status has changed. + // This wakes any consumer waiting in ReadFromCurrentPartAsync to check completion. + _partAvailable.Set(); + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(PartBufferManager)); + } + + #region Dispose Pattern + + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + // Clean up all data sources (both buffered and streaming) + foreach (var dataSource in _partDataSources.Values) + { + dataSource?.Dispose(); + } + _partDataSources.Clear(); + + // Clean up synchronization primitives + _bufferSpaceAvailable?.Dispose(); + _partAvailable?.Dispose(); + } + catch (Exception) + { + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/RequestMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/RequestMapper.cs new file mode 100644 index 000000000000..fb22d6a9ff55 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/RequestMapper.cs @@ -0,0 +1,92 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Utility class for mapping Transfer Utility request objects to S3 request objects. + /// Centralizes request creation logic to ensure consistency across different commands. + /// + internal static class RequestMapper + { + /// + /// Maps a BaseDownloadRequest to GetObjectRequest. + /// Includes comprehensive property mappings for all supported download scenarios. + /// + /// The BaseDownloadRequest to map from + /// A new GetObjectRequest with mapped fields + /// Thrown when request is null + internal static GetObjectRequest MapToGetObjectRequest(BaseDownloadRequest request) + { + if (request == null) + throw new ArgumentNullException(nameof(request)); + + var getRequest = new GetObjectRequest + { + BucketName = request.BucketName, + Key = request.Key, + VersionId = request.VersionId + }; + + // Map date conditions + if (request.IsSetModifiedSinceDate()) + { + getRequest.ModifiedSinceDate = request.ModifiedSinceDate; + } + if (request.IsSetUnmodifiedSinceDate()) + { + getRequest.UnmodifiedSinceDate = request.UnmodifiedSinceDate; + } + + // Map server-side encryption properties + getRequest.ServerSideEncryptionCustomerMethod = request.ServerSideEncryptionCustomerMethod; + getRequest.ServerSideEncryptionCustomerProvidedKey = request.ServerSideEncryptionCustomerProvidedKey; + getRequest.ServerSideEncryptionCustomerProvidedKeyMD5 = request.ServerSideEncryptionCustomerProvidedKeyMD5; + + // Map additional properties + getRequest.ChecksumMode = request.ChecksumMode; + getRequest.RequestPayer = request.RequestPayer; + + // Map ownership and ETag matching properties + if (request.IsSetExpectedBucketOwner()) + { + getRequest.ExpectedBucketOwner = request.ExpectedBucketOwner; + } + if (request.IsSetIfMatch()) + { + getRequest.EtagToMatch = request.IfMatch; + } + if (request.IsSetIfNoneMatch()) + { + getRequest.EtagToNotMatch = request.IfNoneMatch; + } + + // Map response header overrides + getRequest.ResponseHeaderOverrides = request.ResponseHeaderOverrides; + + return getRequest; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/StreamPartBuffer.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamPartBuffer.cs new file mode 100644 index 000000000000..a850a9f9ad38 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamPartBuffer.cs @@ -0,0 +1,175 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Buffers; +using System.Diagnostics.CodeAnalysis; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Container for downloaded part data optimized for streaming scenarios. + /// Uses ArrayPool buffers and tracks reading position for sequential access + /// by BufferedMultipartStream. + /// + internal class StreamPartBuffer : IDisposable + { + private bool _disposed = false; + + /// + /// Gets or sets the part number for priority queue ordering. + /// For Part GET strategy: Uses the actual part number from the multipart upload. + /// For Range GET strategy: Calculated based on byte range position. + /// + public int PartNumber { get; set; } + + /// + /// Gets or sets the ArrayPool buffer containing the downloaded part data. + /// Ownership belongs to this StreamPartBuffer and will be returned to pool on disposal. + /// + public byte[] ArrayPoolBuffer { get; set; } + + /// + /// Gets or sets the current reading position within the buffer. + /// Used by BufferedMultipartStream for sequential reading. + /// + public int CurrentPosition { get; set; } = 0; + + /// + /// Gets the number of bytes remaining to be read from current position. + /// + public int RemainingBytes => Math.Max(0, Length - CurrentPosition); + + /// + /// Gets or sets the length of valid data in the ArrayPool buffer. + /// The buffer may be larger than this due to ArrayPool size rounding. + /// + public int Length { get; set; } + + /// + /// Creates a new StreamPartBuffer for streaming scenarios. + /// For internal use only - external callers should use Create() factory method. + /// + /// The part number for ordering + /// The ArrayPool buffer containing the data (ownership transferred) + /// The length of valid data in the buffer + internal StreamPartBuffer(int partNumber, byte[] arrayPoolBuffer, int length) + { + PartNumber = partNumber; + ArrayPoolBuffer = arrayPoolBuffer; + Length = length; + CurrentPosition = 0; + } + + /// + /// Creates a new StreamPartBuffer with a rented ArrayPool buffer. + /// The StreamPartBuffer takes ownership and will return the buffer on disposal. + /// + /// The part number for ordering + /// Initial capacity needed for the buffer + /// A StreamPartBuffer with rented buffer ready for writing + public static StreamPartBuffer Create(int partNumber, int capacity) + { + var buffer = ArrayPool.Shared.Rent(capacity); + return new StreamPartBuffer(partNumber, buffer, 0); // Length will be set after writing + } + + /// + /// Sets the length of valid data in the buffer after writing. + /// Can only be called once to prevent state corruption. + /// + /// The number of valid bytes written to the buffer + /// Thrown if length has already been set + /// Thrown if length is negative or exceeds buffer capacity + internal void SetLength(int length) + { + if (Length > 0) + throw new InvalidOperationException("Length has already been set and cannot be changed"); + + if (length < 0) + throw new ArgumentOutOfRangeException(nameof(length), "Length must be non-negative"); + + if (ArrayPoolBuffer != null && length > ArrayPoolBuffer.Length) + throw new ArgumentOutOfRangeException(nameof(length), "Length exceeds buffer capacity"); + + Length = length; + } + + /// + /// Returns a string representation of this StreamPartBuffer for debugging. + /// + /// A string describing this stream part buffer + public override string ToString() + { + return $"StreamPartBuffer(Part={PartNumber}, ArrayPool={Length} bytes, pos={CurrentPosition}, remaining={RemainingBytes})"; + } + + #region IDisposable Implementation + + /// + /// Releases all resources used by this StreamPartBuffer. + /// + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + /// + /// Releases the unmanaged resources used by the StreamPartBuffer and optionally releases the managed resources. + /// Returns the ArrayPool buffer back to the shared pool. + /// + /// True to release both managed and unmanaged resources; false to release only unmanaged resources. + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + protected virtual void Dispose(bool disposing) + { + if (!_disposed && disposing) + { + try + { + // Return ArrayPool buffer to shared pool + if (ArrayPoolBuffer != null) + { + ArrayPool.Shared.Return(ArrayPoolBuffer); + ArrayPoolBuffer = null; + } + } + catch (Exception) + { + + } + + _disposed = true; + } + } + + /// + /// Finalizer to ensure resources are cleaned up if Dispose is not called. + /// + ~StreamPartBuffer() + { + Dispose(false); + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs new file mode 100644 index 000000000000..7341157ce772 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs @@ -0,0 +1,87 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Provides helper methods for Task-based operations. + /// + internal static class TaskHelpers + { + /// + /// Waits for all tasks to complete or till any task fails or is canceled. + /// + /// List of tasks to wait for completion + /// Cancellation token to observe + /// A task that represents the completion of all tasks or the first exception + internal static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, CancellationToken cancellationToken) + { + int processed = 0; + int total = pendingTasks.Count; + while (processed < total) + { + cancellationToken.ThrowIfCancellationRequested(); + + var completedTask = await Task.WhenAny(pendingTasks) + .ConfigureAwait(continueOnCapturedContext: false); + + //If RanToCompletion a response will be returned + //If Faulted or Canceled an appropriate exception will be thrown + await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + + pendingTasks.Remove(completedTask); + processed++; + } + } + + /// + /// Waits for all tasks to complete or till any task fails or is canceled. + /// Returns results from all completed tasks. + /// + /// The type of result returned by the tasks + /// List of tasks to wait for completion + /// Cancellation token to observe + /// A task that represents the completion of all tasks with their results, or the first exception + internal static async Task> WhenAllOrFirstExceptionAsync(List> pendingTasks, CancellationToken cancellationToken) + { + int processed = 0; + int total = pendingTasks.Count; + var responses = new List(); + while (processed < total) + { + cancellationToken.ThrowIfCancellationRequested(); + + var completedTask = await Task.WhenAny(pendingTasks) + .ConfigureAwait(continueOnCapturedContext: false); + + //If RanToCompletion a response will be returned + //If Faulted or Canceled an appropriate exception will be thrown + var response = await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + responses.Add(response); + + pendingTasks.Remove(completedTask); + processed++; + } + + return responses; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs index c4ae5bb8b9e5..649c290fac2f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs @@ -82,7 +82,7 @@ await asyncThrottler.WaitAsync(cancellationToken) } while (listResponse.IsTruncated.GetValueOrDefault()); - await WhenAllOrFirstExceptionAsync(pendingTasks,cancellationToken) + await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks,cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); return new TransferUtilityAbortMultipartUploadsResponse(); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs index 65ee8d8cb4c8..83a828610bd2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs @@ -31,58 +31,6 @@ internal abstract partial class BaseCommand where TResponse : class /// public abstract Task ExecuteAsync(CancellationToken cancellationToken); - /// - /// Waits for all of the tasks to complete or till any task fails or is canceled. - /// - protected static async Task> WhenAllOrFirstExceptionAsync(List> pendingTasks, CancellationToken cancellationToken) - { - int processed = 0; - int total = pendingTasks.Count; - var responses = new List(); - while (processed < total) - { - cancellationToken.ThrowIfCancellationRequested(); - - var completedTask = await Task.WhenAny(pendingTasks) - .ConfigureAwait(continueOnCapturedContext: false); - - //If RanToCompletion a response will be returned - //If Faulted or Canceled an appropriate exception will be thrown - var response = await completedTask - .ConfigureAwait(continueOnCapturedContext: false); - responses.Add(response); - - pendingTasks.Remove(completedTask); - processed++; - } - - return responses; - } - - /// - /// Waits for all of the tasks to complete or till any task fails or is canceled. - /// - protected static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, CancellationToken cancellationToken) - { - int processed = 0; - int total = pendingTasks.Count; - while (processed < total) - { - cancellationToken.ThrowIfCancellationRequested(); - - var completedTask = await Task.WhenAny(pendingTasks) - .ConfigureAwait(continueOnCapturedContext: false); - - //If RanToCompletion a response will be returned - //If Faulted or Canceled an appropriate exception will be thrown - await completedTask - .ConfigureAwait(continueOnCapturedContext: false); - - pendingTasks.Remove(completedTask); - processed++; - } - } - protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) where T : class { try diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index 3f29336f0fe0..0ea2f205258a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -133,7 +133,7 @@ await localThrottler.WaitAsync(cancellationToken) } Logger.DebugFormat("Waiting for upload part requests to complete. ({0})", initResponse.UploadId); - _uploadResponses = await WhenAllOrFirstExceptionAsync(pendingUploadPartTasks, cancellationToken) + _uploadResponses = await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingUploadPartTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); Logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs index 8c954d256fab..414e9a1641d1 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs @@ -32,7 +32,6 @@ public override async Task ExecuteAsync(Cance var response = await _s3Client.GetObjectAsync(getRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); _responseStream = response.ResponseStream; - // TODO map and return response return new TransferUtilityOpenStreamResponse(); } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs new file mode 100644 index 000000000000..0432c10f8bf7 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs @@ -0,0 +1,85 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + internal partial class OpenStreamWithResponseCommand : BaseCommand + { + private Logger Logger + { + get { return Logger.GetLogger(typeof(TransferUtility)); } + } + + public override async Task ExecuteAsync(CancellationToken cancellationToken) + { + Logger.DebugFormat("OpenStreamWithResponseCommand: Creating BufferedMultipartStream with MultipartDownloadType={0}", + _request.MultipartDownloadType); + + Logger.DebugFormat("OpenStreamWithResponseCommand: Configuration - ConcurrentServiceRequests={0}, MaxInMemoryParts={1}, BufferSize={2}", + _config.ConcurrentServiceRequests, + _config.MaxInMemoryParts, + _s3Client.Config.BufferSize + ); + + var bufferedStream = BufferedMultipartStream.Create(_s3Client, _request, _config, this.RequestEventHandler); + await bufferedStream.InitializeAsync(cancellationToken).ConfigureAwait(false); + + // Populate metadata from the initial GetObject response (from discovery phase) + var discoveryResult = bufferedStream.DiscoveryResult; + + Logger.DebugFormat("OpenStreamWithResponseCommand: Stream initialized successfully - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", + discoveryResult.ObjectSize, + discoveryResult.TotalParts, + discoveryResult.IsSinglePart); + + var response = ResponseMapper.MapGetObjectResponseToOpenStream(discoveryResult.InitialResponse); + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Set ContentLength to total object size (not just first part) + response.Headers.ContentLength = discoveryResult.ObjectSize; + + // Set ContentRange to represent the entire object: bytes 0-(ContentLength-1)/ContentLength + response.ContentRange = $"bytes 0-{discoveryResult.ObjectSize - 1}/{discoveryResult.ObjectSize}"; + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Handle composite checksums for multipart objects + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + if (response.ChecksumType == ChecksumType.COMPOSITE) + { + response.ChecksumCRC32 = null; + response.ChecksumCRC32C = null; + response.ChecksumCRC64NVME = null; + response.ChecksumSHA1 = null; + response.ChecksumSHA256 = null; + } + + response.ResponseStream = bufferedStream; + return response; + + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index 9382ab33b757..c4edfa090502 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -111,7 +111,7 @@ await asyncThrottler.WaitAsync(cancellationToken) pendingTasks.Add(task); } - await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) + await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); return new TransferUtilityDownloadDirectoryResponse diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index 10f09be9ed07..b848988a3ca2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -77,7 +77,7 @@ public override async Task ExecuteAsync( var task = ExecuteCommandAsync(uploadCommand, internalCts, loopThrottler); pendingTasks.Add(task); } - await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) + await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); } finally diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs index 938bebf7653e..6e2942baba23 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs @@ -327,6 +327,13 @@ public partial interface ITransferUtility : IDisposable /// Amazon S3 bucket and key. /// The caller of this method is responsible for closing the stream. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// The name of the bucket. /// @@ -344,6 +351,13 @@ public partial interface ITransferUtility : IDisposable /// specified by the TransferUtilityOpenStreamRequest. /// The caller of this method is responsible for closing the stream. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// Contains all the parameters required for the OpenStream operation. /// @@ -353,6 +367,133 @@ public partial interface ITransferUtility : IDisposable /// The task object representing the asynchronous operation. Task OpenStreamAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Returns a stream from which the caller can read the content from the specified + /// Amazon S3 bucket and key, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20, + /// + /// // Limit memory usage by capping buffered parts (default: 1024) + /// // With 8MB parts, 1024 parts = 8GB max memory + /// MaxInMemoryParts = 512 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// The name of the bucket. + /// + /// + /// The object key. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with response metadata. + Task OpenStreamWithResponseAsync(string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Returns a stream to read the contents from Amazon S3 as + /// specified by the TransferUtilityOpenStreamRequest, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20, + /// + /// // Limit memory usage by capping buffered parts (default: 1024) + /// // With 8MB parts, 1024 parts = 8GB max memory + /// MaxInMemoryParts = 512 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityOpenStreamRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// }; + /// var response = await transferUtility.OpenStreamWithResponseAsync(request); + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// Contains all the parameters required for the OpenStreamWithResponse operation. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with response metadata. + Task OpenStreamWithResponseAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion } } diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index e1c52c2a6e68..79899ce70535 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -55,120 +55,21 @@ namespace Amazon.S3.Transfer public partial class TransferUtility : ITransferUtility { #region Upload - /// - /// Uploads the specified file. - /// The object key is derived from the file's name. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName); await UploadAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads the specified file. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName,key); await UploadAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads the contents of the specified stream. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The stream to read to obtain the content to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(stream, bucketName, key); @@ -177,37 +78,7 @@ public partial class TransferUtility : ITransferUtility - /// - /// Uploads the file or stream specified by the request. - /// To track the progress of the upload, - /// add an event listener to the request's UploadProgressEvent. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The part size buffer for the multipart upload is controlled by the partSize - /// specified on the TransferUtilityUploadRequest, and if none is specified it defaults to S3Constants.MinPartSize (5 megabytes). - /// You can also adjust the read buffer size (i.e. how many bytes to read before adding it to the - /// part buffer) via the BufferSize property on the ClientConfig. The default value for this is 8192 bytes. - /// - /// - /// - /// Contains all the parameters required to upload to Amazon S3. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(UploadAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -252,19 +123,7 @@ public partial class TransferUtility : ITransferUtility #endregion #region AbortMultipartUploads - /// - /// Aborts the multipart uploads that were initiated before the specified date. - /// - /// - /// The name of the bucket containing multipart uploads. - /// - /// - /// The date before which the multipart uploads were initiated. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task AbortMultipartUploadsAsync(string bucketName, DateTime initiatedDate, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(AbortMultipartUploadsAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -303,18 +162,7 @@ public partial class TransferUtility : ITransferUtility #region Download - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// If the key is not specified in the request parameter, - /// the file name will used as the key name. - /// - /// - /// Contains all the parameters required to download an Amazon S3 object. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(DownloadAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -328,21 +176,7 @@ public partial class TransferUtility : ITransferUtility #endregion #region OpenStream - /// - /// Returns a stream from which the caller can read the content from the specified - /// Amazon S3 bucket and key. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// The name of the bucket. - /// - /// - /// The object key. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task OpenStreamAsync(string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { TransferUtilityOpenStreamRequest request = new TransferUtilityOpenStreamRequest() @@ -353,18 +187,7 @@ public partial class TransferUtility : ITransferUtility return await OpenStreamAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Returns a stream to read the contents from Amazon S3 as - /// specified by the TransferUtilityOpenStreamRequest. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// Contains all the parameters required for the OpenStream operation. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task OpenStreamAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(OpenStreamAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -376,6 +199,28 @@ public partial class TransferUtility : ITransferUtility } } + /// + public async Task OpenStreamWithResponseAsync(string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + TransferUtilityOpenStreamRequest request = new TransferUtilityOpenStreamRequest() + { + BucketName = bucketName, + Key = key + }; + return await OpenStreamWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task OpenStreamWithResponseAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(OpenStreamWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "OpenStreamWithResponse"); + OpenStreamWithResponseCommand command = new OpenStreamWithResponseCommand(this._s3Client, request, this._config); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); + } + } + #endregion internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index 8444104739e2..5a8a360ea00d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -224,6 +224,13 @@ public partial interface ITransferUtility /// Amazon S3 bucket and key. /// The caller of this method is responsible for closing the stream. ///
+ /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// The name of the bucket. /// @@ -240,6 +247,13 @@ public partial interface ITransferUtility /// specified by the TransferUtilityOpenStreamRequest. /// The caller of this method is responsible for closing the stream. ///
+ /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// Contains all the parameters required to open a stream to an S3 object. /// @@ -248,6 +262,131 @@ public partial interface ITransferUtility /// Stream OpenStream(TransferUtilityOpenStreamRequest request); + /// + /// Returns a stream from which the caller can read the content from the specified + /// Amazon S3 bucket and key, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20, + /// + /// // Limit memory usage by capping buffered parts (default: 1024) + /// // With 8MB parts, 1024 parts = 8GB max memory + /// MaxInMemoryParts = 512 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// The name of the bucket. + /// + /// + /// The object key. + /// + /// + /// A response containing the stream and metadata from the specified Amazon S3 bucket and key. + /// + TransferUtilityOpenStreamResponse OpenStreamWithResponse(string bucketName, string key); + + /// + /// Returns a stream to read the contents from Amazon S3 as + /// specified by the TransferUtilityOpenStreamRequest, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20, + /// + /// // Limit memory usage by capping buffered parts (default: 1024) + /// // With 8MB parts, 1024 parts = 8GB max memory + /// MaxInMemoryParts = 512 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityOpenStreamRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// }; + /// var response = transferUtility.OpenStreamWithResponse(request); + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// Contains all the parameters required for the OpenStreamWithResponse operation. + /// + /// + /// A response containing the stream and metadata from Amazon S3. + /// + TransferUtilityOpenStreamResponse OpenStreamWithResponse(TransferUtilityOpenStreamRequest request); + #endregion #region Download diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index 9a627d30c282..c37d98c2ed97 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -31,29 +31,7 @@ namespace Amazon.S3.Transfer public partial class TransferUtility : ITransferUtility { #region UploadDirectory - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// + /// public void UploadDirectory(string directory, string bucketName) { try @@ -67,36 +45,7 @@ public void UploadDirectory(string directory, string bucketName) } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A pattern used to identify the files from the source directory to upload. - /// - /// - /// A search option that specifies whether to recursively search for files to upload - /// in subdirectories. - /// + /// public void UploadDirectory(string directory, string bucketName, string searchPattern, SearchOption searchOption) { try @@ -109,26 +58,7 @@ public void UploadDirectory(string directory, string bucketName, string searchPa } } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The request that contains all the parameters required to upload a directory. - /// + /// public void UploadDirectory(TransferUtilityUploadDirectoryRequest request) { try @@ -144,29 +74,7 @@ public void UploadDirectory(TransferUtilityUploadDirectoryRequest request) #region Upload - /// - /// Uploads the specified file. - /// The object key is derived from the file's name. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// + /// public void Upload(string filePath, string bucketName) { try @@ -179,31 +87,7 @@ public void Upload(string filePath, string bucketName) } } - /// - /// Uploads the specified file. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// + /// public void Upload(string filePath, string bucketName, string key) { try @@ -217,30 +101,7 @@ public void Upload(string filePath, string bucketName, string key) } - /// - /// Uploads the contents of the specified stream. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The stream to read to obtain the content to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// + /// public void Upload(Stream stream, string bucketName, string key) { try @@ -253,26 +114,7 @@ public void Upload(Stream stream, string bucketName, string key) } } - /// - /// Uploads the file or stream specified by the request. - /// To track the progress of the upload, - /// add an event listener to the request's UploadProgressEvent. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// Contains all the parameters required to upload to Amazon S3. - /// + /// public void Upload(TransferUtilityUploadRequest request) { try @@ -345,20 +187,7 @@ public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadReq #region OpenStream - /// - /// Returns a stream from which the caller can read the content from the specified - /// Amazon S3 bucket and key. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// The name of the bucket. - /// - /// - /// The object key. - /// - /// - /// A stream of the contents from the specified Amazon S3 and key. - /// + /// public Stream OpenStream(string bucketName, string key) { try @@ -372,17 +201,7 @@ public Stream OpenStream(string bucketName, string key) } } - /// - /// Returns a stream to read the contents from Amazon S3 as - /// specified by the TransferUtilityOpenStreamRequest. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// Contains all the parameters required to open a stream to an S3 object. - /// - /// - /// A stream of the contents from Amazon S3. - /// + /// public Stream OpenStream(TransferUtilityOpenStreamRequest request) { try @@ -396,21 +215,38 @@ public Stream OpenStream(TransferUtilityOpenStreamRequest request) } } + /// + public TransferUtilityOpenStreamResponse OpenStreamWithResponse(string bucketName, string key) + { + try + { + return OpenStreamWithResponseAsync(bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + public TransferUtilityOpenStreamResponse OpenStreamWithResponse(TransferUtilityOpenStreamRequest request) + { + try + { + return OpenStreamWithResponseAsync(request).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + #endregion #region Download - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// - /// - /// The file path where the content from Amazon S3 will be written to. - /// - /// - /// The name of the bucket containing the Amazon S3 object to download. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// + /// public void Download(string filePath, string bucketName, string key) { try @@ -423,14 +259,7 @@ public void Download(string filePath, string bucketName, string key) } } - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// If the key is not specified in the request parameter, - /// the file name will used as the key name. - /// - /// - /// Contains all the parameters required to download an Amazon S3 object. - /// + /// public void Download(TransferUtilityDownloadRequest request) { try @@ -445,19 +274,7 @@ public void Download(TransferUtilityDownloadRequest request) #endregion #region DownloadDirectory - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by s3Directory. - /// - /// - /// The name of the bucket containing the Amazon S3 objects to download. - /// - /// - /// The directory in Amazon S3 to download. - /// - /// - /// The local directory to download the objects to. - /// + /// public void DownloadDirectory(string bucketName, string s3Directory, string localDirectory) { try @@ -470,15 +287,7 @@ public void DownloadDirectory(string bucketName, string s3Directory, string loca } } - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by the S3Directory - /// property of the passed in TransferUtilityDownloadDirectoryRequest object. - /// - /// - /// Contains all the parameters required to download objects from Amazon S3 - /// into a local directory. - /// + /// public void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request) { try @@ -494,15 +303,7 @@ public void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request) #region AbortMultipartUploads - /// - /// Aborts the multipart uploads that were initiated before the specified date. - /// - /// - /// The name of the bucket containing multipart uploads. - /// - /// - /// The date before which the multipart uploads were initiated. - /// + /// public void AbortMultipartUploads(string bucketName, DateTime initiatedDate) { try @@ -515,12 +316,7 @@ public void AbortMultipartUploads(string bucketName, DateTime initiatedDate) } } - /// - /// Aborts the multipart uploads based on the specified request parameters. - /// - /// - /// Contains all the parameters required to abort multipart uploads. - /// + /// public void AbortMultipartUploads(TransferUtilityAbortMultipartUploadRequest request) { try diff --git a/sdk/src/Services/S3/Properties/AssemblyInfo.cs b/sdk/src/Services/S3/Properties/AssemblyInfo.cs index 398e0d2d2759..6a68a7b58fff 100644 --- a/sdk/src/Services/S3/Properties/AssemblyInfo.cs +++ b/sdk/src/Services/S3/Properties/AssemblyInfo.cs @@ -21,6 +21,7 @@ [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] +[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] [assembly: AssemblyCompany("Amazon.com, Inc")] diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs new file mode 100644 index 000000000000..b78f19ecacb3 --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs @@ -0,0 +1,474 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using Amazon.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.OpenStreamWithResponse functionality. + /// These tests verify end-to-end functionality with actual S3 operations. + /// + /// Most test scenarios (buffer sizes, part boundaries, stream behavior) are covered + /// in BufferedMultipartStreamTests.cs with mocked dependencies for faster execution. + /// + /// These integration tests focus on: + /// - Basic single-part downloads + /// - Basic multipart downloads + /// - Real S3 metadata preservation + /// + [TestClass] + public class TransferUtilityOpenStreamTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static string bucketName; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + BaseClean(); + } + + #region Single-Part Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + public async Task OpenStream_SinglePart_SmallObject() + { + // Arrange + var objectSize = 2 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(1 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, "Downloaded data checksum should match"); + Assert.AreEqual(objectSize, downloadedBytes.Length, "Downloaded size should match"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + public async Task OpenStream_SinglePart_EmptyObject() + { + // Arrange + var key = UtilityMethods.GenerateName("empty-object"); + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + ContentBody = "" + }); + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.AreEqual(0, response.Headers.ContentLength); + + var buffer = new byte[1024]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.AreEqual(0, bytesRead, "Should read 0 bytes from empty object"); + } + } + + #endregion + + #region Multipart Test + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Multipart")] + public async Task OpenStream_Multipart_BasicDownload() + { + // Arrange - Simple multipart download to verify end-to-end S3 integration + var objectSize = 20 * MB; + var partSize = 8 * MB; + var key = UtilityMethods.GenerateName("openstream-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = partSize // Force multipart upload with explicit part size + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + // Note: PartsCount is only returned when PartNumber is specified in the request + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + var request = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = partSize + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(request)) + { + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should not be null"); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, "Downloaded data checksum should match"); + Assert.AreEqual(objectSize, downloadedBytes.Length, "Downloaded size should match"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Multipart")] + public async Task OpenStream_Multipart_RangeDownload() + { + // Arrange - Test RANGE-based multipart download with custom part size + var objectSize = 20 * MB; + var uploadPartSize = 8 * MB; // Upload with 8MB parts + var downloadPartSize = 6 * MB; // Download with different 6MB parts to test RANGE strategy + var key = UtilityMethods.GenerateName("openstream-range-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize // Force multipart upload + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + // Act - Download using RANGE strategy with different part size + var request = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + MultipartDownloadType = MultipartDownloadType.RANGE, + PartSize = downloadPartSize // Use different part size than upload + }; + + using (var response = await transferUtility.OpenStreamWithResponseAsync(request)) + { + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should not be null"); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Downloaded data checksum should match (RANGE strategy)"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match (RANGE strategy)"); + } + } + + #endregion + + #region Checksum Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Checksum")] + public async Task OpenStream_MultipartObjectWithChecksums_NullsCompositeChecksums() + { + // Arrange - Upload a multipart object with checksums + // Object must be > 16MB to trigger multipart upload with checksums + var objectSize = 20 * MB; + var key = UtilityMethods.GenerateName("composite-checksum-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Upload with checksum algorithm to create composite checksum + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + ChecksumAlgorithm = ChecksumAlgorithm.CRC32, + PartSize = 8 * MB + }; + + var uploadUtility = new TransferUtility(Client); + await uploadUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + // Note: PartsCount is only returned when PartNumber is specified in the request + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test composite checksums"); + + // Act - Download with ChecksumMode enabled + var openStreamRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + ChecksumMode = ChecksumMode.ENABLED + }; + + using (var response = await uploadUtility.OpenStreamWithResponseAsync(openStreamRequest)) + { + // Assert - Verify ChecksumType is COMPOSITE + Assert.AreEqual(ChecksumType.COMPOSITE, response.ChecksumType, + "ChecksumType should be COMPOSITE for multipart objects"); + + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + Assert.IsNull(response.ChecksumCRC32, "ChecksumCRC32 should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC32C, "ChecksumCRC32C should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC64NVME, "ChecksumCRC64NVME should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA1, "ChecksumSHA1 should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA256, "ChecksumSHA256 should be null for composite checksums"); + + // Verify other response properties are still populated correctly + Assert.IsNotNull(response.ETag, "ETag should still be populated"); + Assert.IsTrue(response.Headers.ContentLength > 0, "ContentLength should be populated"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should be available"); + + // Verify we can still read the stream + var buffer = new byte[1024]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.IsTrue(bytesRead > 0, "Should be able to read from stream despite null checksums"); + } + } + + #endregion + + #region Metadata Validation Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Metadata")] + public async Task OpenStream_PreservesMetadata() + { + // Arrange + var objectSize = 10 * MB; + var key = UtilityMethods.GenerateName("metadata-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + var putRequest = new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + ContentType = "application/octet-stream" + }; + putRequest.Metadata.Add("test-key", "test-value"); + putRequest.Metadata.Add("custom-header", "custom-value"); + + await Client.PutObjectAsync(putRequest); + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("application/octet-stream", response.Headers.ContentType); + + // S3 automatically prefixes user-defined metadata with "x-amz-meta-" + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-test-key"), + "Metadata should contain 'x-amz-meta-test-key'"); + Assert.AreEqual("test-value", response.Metadata["x-amz-meta-test-key"]); + + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-custom-header"), + "Metadata should contain 'x-amz-meta-custom-header'"); + Assert.AreEqual("custom-value", response.Metadata["x-amz-meta-custom-header"]); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Metadata")] + public async Task OpenStream_PreservesETag() + { + // Arrange + var objectSize = 15 * MB; + var key = UtilityMethods.GenerateName("etag-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }); + + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + var expectedETag = metadata.ETag; + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response.ETag); + Assert.AreEqual(expectedETag, response.ETag); + } + } + + #endregion + + #region Helper Methods + + /// + /// Creates a test object in S3 with the specified size and returns its key and checksum. + /// + private static async Task<(string key, string checksum)> CreateTestObjectWithChecksum(long objectSize) + { + var key = UtilityMethods.GenerateName("openstream-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var checksum = CalculateFileChecksum(filePath); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }); + + return (key, checksum); + } + + /// + /// Calculates the MD5 checksum of a file. + /// + private static string CalculateFileChecksum(string filePath) + { + using (var md5 = System.Security.Cryptography.MD5.Create()) + using (var stream = File.OpenRead(filePath)) + { + var hash = md5.ComputeHash(stream); + return Convert.ToBase64String(hash); + } + } + + /// + /// Validates that the response headers contain expected values. + /// + private static void ValidateHeaders(TransferUtilityOpenStreamResponse response, long expectedSize) + { + Assert.IsNotNull(response.Headers, "Headers should not be null"); + Assert.AreEqual(expectedSize, response.Headers.ContentLength, "Content length should match"); + Assert.IsNotNull(response.ETag, "ETag should not be null"); + } + + /// + /// Reads a stream completely into a byte array using the specified buffer size. + /// + private static async Task ReadStreamToByteArray(Stream stream, long totalSize, int bufferSize) + { + var result = new byte[totalSize]; + var buffer = new byte[bufferSize]; + long totalRead = 0; + + int bytesRead; + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + Array.Copy(buffer, 0, result, totalRead, bytesRead); + totalRead += bytesRead; + } + + Assert.AreEqual(totalSize, totalRead, "Should read expected number of bytes"); + return result; + } + + /// + /// Calculates the MD5 checksum of a byte array. + /// + private static string CalculateChecksum(byte[] data) + { + using (var md5 = System.Security.Cryptography.MD5.Create()) + { + var hash = md5.ComputeHash(data); + return Convert.ToBase64String(hash); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedDataSourceTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedDataSourceTests.cs new file mode 100644 index 000000000000..24b25b97ea93 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedDataSourceTests.cs @@ -0,0 +1,487 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Buffers; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for BufferedDataSource class. + /// Tests reading from pre-buffered StreamPartBuffer data. + /// + [TestClass] + public class BufferedDataSourceTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidPartBuffer_CreatesDataSource() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + + // Act + var dataSource = new BufferedDataSource(partBuffer); + + // Assert + Assert.IsNotNull(dataSource); + Assert.AreEqual(1, dataSource.PartNumber); + Assert.IsFalse(dataSource.IsComplete); + + // Cleanup + dataSource.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullPartBuffer_ThrowsArgumentNullException() + { + // Act + var dataSource = new BufferedDataSource(null); + + // Assert - ExpectedException + } + + #endregion + + #region Property Tests + + [TestMethod] + public void PartNumber_ReturnsPartBufferPartNumber() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(5, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act & Assert + Assert.AreEqual(5, dataSource.PartNumber); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public void IsComplete_WhenNoRemainingBytes_ReturnsTrue() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + partBuffer.CurrentPosition = 512; // Move to end + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act & Assert + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public void IsComplete_WhenRemainingBytes_ReturnsFalse() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act & Assert + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Happy Path + + [TestMethod] + public async Task ReadAsync_ReadsDataFromPartBuffer() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[512]; + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WithPartialRead_ReturnsRequestedBytes() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[256]; + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 256, CancellationToken.None); + + // Assert + Assert.AreEqual(256, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 256)); + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WithFullRead_ReadsAllRemainingBytes() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[1024]; // Larger than available + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 1024, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); // Only 512 available + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WhenComplete_ReturnsZero() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + partBuffer.CurrentPosition = 512; // Move to end + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[256]; + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 256, CancellationToken.None); + + // Assert + Assert.AreEqual(0, bytesRead); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act + await dataSource.ReadAsync(null, 0, 100, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + try + { + // Act + await dataSource.ReadAsync(readBuffer, -1, 100, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + try + { + // Act + await dataSource.ReadAsync(readBuffer, 0, -1, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + try + { + // Act - offset + count > buffer.Length + await dataSource.ReadAsync(readBuffer, 100, 200, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Multiple Reads + + [TestMethod] + public async Task ReadAsync_MultipleReads_ConsumesAllData() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer1 = new byte[256]; + byte[] readBuffer2 = new byte[256]; + + try + { + // Act - Read in two chunks + int bytesRead1 = await dataSource.ReadAsync(readBuffer1, 0, 256, CancellationToken.None); + int bytesRead2 = await dataSource.ReadAsync(readBuffer2, 0, 256, CancellationToken.None); + + // Assert + Assert.AreEqual(256, bytesRead1); + Assert.AreEqual(256, bytesRead2); + Assert.IsTrue(dataSource.IsComplete); + + // Verify data correctness + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer1, 0, 256)); + + // Extract second segment manually for .NET Framework compatibility + byte[] secondSegment = new byte[256]; + Buffer.BlockCopy(testData, 256, secondSegment, 0, 256); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + secondSegment, + readBuffer2, + 0, + 256)); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_ReadingToEnd_ReturnsZeroOnSubsequentReads() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[512]; + + try + { + // Act - Read all data + int bytesRead1 = await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Try to read again + int bytesRead2 = await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead1); + Assert.AreEqual(0, bytesRead2); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region Error Handling Tests + + [TestMethod] + public async Task ReadAsync_WhenExceptionDuringRead_MarksBufferConsumed() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Create a buffer that will cause BlockCopy to fail (null destination) + byte[] readBuffer = null; + + try + { + // Act & Assert - Should throw ArgumentNullException + await Assert.ThrowsExceptionAsync(async () => + { + await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Verify buffer was marked as consumed (position set to Length) + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_DisposesUnderlyingPartBuffer() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Act + dataSource.Dispose(); + + // Assert - The underlying part buffer should be disposed (ArrayPoolBuffer nulled) + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Act - Dispose multiple times + dataSource.Dispose(); + dataSource.Dispose(); + dataSource.Dispose(); + + // Assert - Should not throw + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task ReadAsync_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + // Dispose the data source + dataSource.Dispose(); + + // Act - Try to read after disposal + await dataSource.ReadAsync(readBuffer, 0, 256, CancellationToken.None); + + // Assert - ExpectedException + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedDownloadConfigurationTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedDownloadConfigurationTests.cs new file mode 100644 index 000000000000..f173be94eb20 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedDownloadConfigurationTests.cs @@ -0,0 +1,151 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for BufferedDownloadConfiguration class. + /// Tests configuration validation and parameter handling. + /// + [TestClass] + public class BufferedDownloadConfigurationTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesConfiguration() + { + // Arrange + int concurrentRequests = 10; + int maxInMemoryParts = 5; + int bufferSize = 8192; + long targetPartSize = 8 * 1024 * 1024; // 8MB + + // Act + var config = new BufferedDownloadConfiguration(concurrentRequests, maxInMemoryParts, bufferSize, targetPartSize); + + // Assert + Assert.AreEqual(concurrentRequests, config.ConcurrentServiceRequests); + Assert.AreEqual(maxInMemoryParts, config.MaxInMemoryParts); + Assert.AreEqual(bufferSize, config.BufferSize); + Assert.AreEqual(targetPartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_WithCustomPartSize_UsesProvidedValue() + { + // Arrange + long expectedPartSize = 10 * 1024 * 1024; // 10MB + + // Act + var config = new BufferedDownloadConfiguration(10, 5, 8192, expectedPartSize); + + // Assert + Assert.AreEqual(expectedPartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_WithVeryLargeValues_HandlesCorrectly() + { + // Arrange + int largeValue = int.MaxValue; + long largePartSize = long.MaxValue / 2; + + // Act + var config = new BufferedDownloadConfiguration(largeValue, largeValue, largeValue, largePartSize); + + // Assert + Assert.AreEqual(largeValue, config.ConcurrentServiceRequests); + Assert.AreEqual(largeValue, config.MaxInMemoryParts); + Assert.AreEqual(largeValue, config.BufferSize); + Assert.AreEqual(largePartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_With8MBPartSize_StoresCorrectValue() + { + // Arrange + long partSize = 8 * 1024 * 1024; // 8MB + + // Act + var config = new BufferedDownloadConfiguration(1, 1, 1, partSize); + + // Assert + Assert.AreEqual(1, config.ConcurrentServiceRequests); + Assert.AreEqual(1, config.MaxInMemoryParts); + Assert.AreEqual(1, config.BufferSize); + Assert.AreEqual(partSize, config.TargetPartSizeBytes); + } + + #endregion + + #region Validation Tests + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeConcurrentRequests_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(-1, 5, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroConcurrentRequests_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(0, 5, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeMaxInMemoryParts_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, -1, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroMaxInMemoryParts_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 0, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeTargetPartSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, 8192, -1L); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroTargetPartSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, 8192, 0L); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeBufferSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, -1, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroBufferSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, 0, 8 * 1024 * 1024); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs new file mode 100644 index 000000000000..d85104e0ad99 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs @@ -0,0 +1,1283 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class BufferedMultipartStreamTests + { + #region Test Constants + + private const int EMPTY_OBJECT_SIZE = 0; + private const int SMALL_OBJECT_SIZE = 512; + private const int MEDIUM_OBJECT_SIZE = 1024; + private const int LARGE_OBJECT_SIZE = 4096; + private const int VERY_LARGE_OBJECT_SIZE = 50 * 1024 * 1024; + private const int DEFAULT_PART_SIZE = 5 * 1024 * 1024; + private const int SMALL_CHUNK_SIZE = 128; + private const int MEDIUM_CHUNK_SIZE = 256; + private const int LARGE_CHUNK_SIZE = 512; + + #endregion + + #region Test Setup Fields + + private Mock _mockCoordinator; + private Mock _mockBufferManager; + private BufferedDownloadConfiguration _config; + + #endregion + + #region Test Initialization + + [TestInitialize] + public void Setup() + { + _mockCoordinator = new Mock(); + _mockBufferManager = new Mock(); + _config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + } + + #endregion + + #region Helper Methods + + private BufferedMultipartStream CreateStream() + { + return new BufferedMultipartStream(_mockCoordinator.Object, _mockBufferManager.Object, _config); + } + + private async Task CreateInitializedStreamAsync( + long objectSize = MEDIUM_OBJECT_SIZE, + int totalParts = 1) + { + var mockResponse = totalParts == 1 + ? MultipartDownloadTestHelpers.CreateSinglePartResponse(objectSize) + : new GetObjectResponse(); + + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = totalParts, + ObjectSize = objectSize, + InitialResponse = mockResponse + }; + + _mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + .ReturnsAsync(discoveryResult); + _mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var stream = CreateStream(); + await stream.InitializeAsync(CancellationToken.None); + return stream; + } + + #endregion + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidDependencies_CreatesStream() + { + // Act + var stream = CreateStream(); + + // Assert + Assert.IsNotNull(stream); + Assert.IsTrue(stream.CanRead); + Assert.IsFalse(stream.CanSeek); + Assert.IsFalse(stream.CanWrite); + } + + [DataTestMethod] + [DataRow(null, "bufferManager", "config", DisplayName = "Null Coordinator")] + [DataRow("coordinator", null, "config", DisplayName = "Null Buffer Manager")] + [DataRow("coordinator", "bufferManager", null, DisplayName = "Null Config")] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullParameter_ThrowsArgumentNullException( + string coordinatorKey, string bufferManagerKey, string configKey) + { + // Arrange + var coordinator = coordinatorKey != null ? _mockCoordinator.Object : null; + var bufferManager = bufferManagerKey != null ? _mockBufferManager.Object : null; + var config = configKey != null ? _config : null; + + // Act + var stream = new BufferedMultipartStream(coordinator, bufferManager, config); + } + + #endregion + + #region Factory Method Tests + + [TestMethod] + public void Create_WithValidParameters_CreatesStream() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + + // Act + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + // Assert + Assert.IsNotNull(stream); + Assert.IsNull(stream.DiscoveryResult); // Not initialized yet + } + + [DataTestMethod] + [DataRow(null, "request", "config", DisplayName = "Null S3 Client")] + [DataRow("client", null, "config", DisplayName = "Null Request")] + [DataRow("client", "request", null, DisplayName = "Null Transfer Config")] + [ExpectedException(typeof(ArgumentNullException))] + public void Create_WithNullParameter_ThrowsArgumentNullException( + string clientKey, string requestKey, string configKey) + { + // Arrange + var client = clientKey != null ? MultipartDownloadTestHelpers.CreateMockS3Client().Object : null; + var request = requestKey != null ? MultipartDownloadTestHelpers.CreateOpenStreamRequest() : null; + var config = configKey != null ? new TransferUtilityConfig() : null; + + // Act + var stream = BufferedMultipartStream.Create(client, request, config); + } + + #endregion + + #region InitializeAsync Tests - Single Part + + [TestMethod] + public async Task InitializeAsync_SinglePart_UsesSinglePartHandler() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = mockResponse + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(stream.DiscoveryResult); + Assert.AreEqual(1, stream.DiscoveryResult.TotalParts); + } + + [TestMethod] + public async Task InitializeAsync_SinglePart_CallsStartDownloads() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = mockResponse + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + .ReturnsAsync(discoveryResult); + mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + mockCoordinator.Verify( + x => x.StartDownloadsAsync(discoveryResult, It.IsAny()), + Times.Once); + } + + #endregion + + #region InitializeAsync Tests - Multipart + + [TestMethod] + public async Task InitializeAsync_Multipart_UsesMultipartHandler() + { + // Arrange + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 5, + ObjectSize = 50 * 1024 * 1024, + InitialResponse = new GetObjectResponse() + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + .ReturnsAsync(discoveryResult); + mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(5, stream.DiscoveryResult.TotalParts); + } + + [TestMethod] + public async Task InitializeAsync_Multipart_StartsDownloads() + { + // Arrange + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 5, + ObjectSize = 50 * 1024 * 1024, + InitialResponse = new GetObjectResponse() + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + .ReturnsAsync(discoveryResult); + mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + mockCoordinator.Verify( + x => x.StartDownloadsAsync(discoveryResult, It.IsAny()), + Times.Once); + } + + #endregion + + #region InitializeAsync Tests - State Management + + [TestMethod] + public async Task InitializeAsync_SetsDiscoveryResult() + { + // Arrange + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024) + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(stream.DiscoveryResult); + Assert.AreEqual(discoveryResult, stream.DiscoveryResult); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task InitializeAsync_CalledTwice_ThrowsInvalidOperationException() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = mockResponse + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + await stream.InitializeAsync(CancellationToken.None); // Second call should throw + } + + #endregion + + #region ReadAsync Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ReadAsync_BeforeInitialize_ThrowsInvalidOperationException() + { + // Arrange + var mockCoordinator = new Mock(); + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + var buffer = new byte[1024]; + + // Act + await stream.ReadAsync(buffer, 0, buffer.Length); + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task ReadAsync_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var mockCoordinator = new Mock(); + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + stream.Dispose(); + var buffer = new byte[1024]; + + // Act + await stream.ReadAsync(buffer, 0, buffer.Length); + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(null, 0, MEDIUM_OBJECT_SIZE)); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(buffer, -1, MEDIUM_OBJECT_SIZE)); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(buffer, 0, -1)); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(buffer, 100, 1000)); // 100 + 1000 > 1024 + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Property Tests + + [TestMethod] + public void StreamCapabilities_HaveCorrectValues() + { + // Arrange + var stream = CreateStream(); + + // Act & Assert + Assert.IsTrue(stream.CanRead, "Stream should be readable"); + Assert.IsFalse(stream.CanSeek, "Stream should not be seekable"); + Assert.IsFalse(stream.CanWrite, "Stream should not be writable"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void Length_BeforeInitialization_ThrowsInvalidOperationException() + { + // Arrange + var stream = CreateStream(); + + // Act + _ = stream.Length; + } + + [TestMethod] + public async Task Length_AfterInitialization_ReturnsObjectSize() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var stream = await CreateInitializedStreamAsync(objectSize: objectSize); + + // Act + var length = stream.Length; + + // Assert + Assert.AreEqual(objectSize, length, "Length should return ObjectSize from discovery result"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Length_ForLargeObject_ReturnsCorrectSize() + { + // Arrange + var objectSize = VERY_LARGE_OBJECT_SIZE; + var stream = await CreateInitializedStreamAsync(objectSize: objectSize, totalParts: 10); + + // Act + var length = stream.Length; + + // Assert + Assert.AreEqual(objectSize, length, "Length should return correct size for large objects"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void Position_BeforeInitialization_ThrowsInvalidOperationException() + { + // Arrange + var stream = CreateStream(); + + // Act + _ = stream.Position; + } + + [TestMethod] + public async Task Position_AfterInitialization_ReturnsZero() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + + // Act + var position = stream.Position; + + // Assert + Assert.AreEqual(0, position, "Position should be 0 before any reads"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_AfterSingleRead_ReturnsCorrectValue() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[SMALL_CHUNK_SIZE]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + var position = stream.Position; + + // Assert + Assert.AreEqual(bytesRead, position, "Position should equal bytes read"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_AfterMultipleReads_AccumulatesCorrectly() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Perform multiple reads + var buffer = new byte[SMALL_CHUNK_SIZE]; + var totalBytesRead = 0; + + var read1 = await stream.ReadAsync(buffer, 0, buffer.Length); + totalBytesRead += read1; + Assert.AreEqual(totalBytesRead, stream.Position, "Position should match after first read"); + + var read2 = await stream.ReadAsync(buffer, 0, buffer.Length); + totalBytesRead += read2; + Assert.AreEqual(totalBytesRead, stream.Position, "Position should accumulate after second read"); + + var read3 = await stream.ReadAsync(buffer, 0, buffer.Length); + totalBytesRead += read3; + Assert.AreEqual(totalBytesRead, stream.Position, "Position should accumulate after third read"); + + // Assert + Assert.IsTrue(totalBytesRead > 0, "Should have read some data"); + Assert.AreEqual(totalBytesRead, stream.Position, "Position should equal total bytes read"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_AtEndOfStream_EqualsLength() + { + // Arrange + var objectSize = SMALL_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream + var buffer = new byte[objectSize]; + await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(stream.Length, stream.Position, + "Position should equal Length after reading entire stream"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_WithZeroByteRead_DoesNotChange() + { + // Arrange + var objectSize = SMALL_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream, then try to read again + var buffer = new byte[objectSize]; + await stream.ReadAsync(buffer, 0, buffer.Length); + var positionAfterFullRead = stream.Position; + + // Try to read past end + await stream.ReadAsync(buffer, 0, buffer.Length); + var positionAfterSecondRead = stream.Position; + + // Assert + Assert.AreEqual(positionAfterFullRead, positionAfterSecondRead, + "Position should not change when read returns 0 bytes"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_SynchronousRead_UpdatesCorrectly() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Use synchronous Read method + var buffer = new byte[SMALL_CHUNK_SIZE]; + var bytesRead = stream.Read(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(bytesRead, stream.Position, + "Position should update correctly for synchronous Read"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_LengthAndPosition_ProvideProgressTracking() + { + // Arrange + var objectSize = LARGE_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act & Assert - Verify progress calculation + var buffer = new byte[MEDIUM_CHUNK_SIZE]; + var totalBytesRead = 0; + + while (true) + { + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + if (bytesRead == 0) break; + + totalBytesRead += bytesRead; + + // Verify progress can be calculated + var progressPercentage = (double)stream.Position / stream.Length * 100; + Assert.IsTrue(progressPercentage >= 0 && progressPercentage <= 100, + "Progress percentage should be between 0 and 100"); + Assert.AreEqual(totalBytesRead, stream.Position, + "Position should track total bytes read"); + } + + // Final verification + Assert.AreEqual(objectSize, totalBytesRead, "Should read entire object"); + Assert.AreEqual(100.0, (double)stream.Position / stream.Length * 100, + "Progress should be 100% at completion"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(NotSupportedException))] + public async Task Position_Setter_ThrowsNotSupportedException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + + // Act + stream.Position = 100; + } + + #endregion + + #region Unsupported Operation Tests + + [DataTestMethod] + [DataRow("Seek", DisplayName = "Seek Operation")] + [DataRow("SetLength", DisplayName = "SetLength Operation")] + [DataRow("Write", DisplayName = "Write Operation")] + public void UnsupportedOperations_ThrowNotSupportedException(string operation) + { + // Arrange + var stream = CreateStream(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + Assert.ThrowsException(() => + { + switch (operation) + { + case "Seek": + stream.Seek(0, SeekOrigin.Begin); + break; + case "SetLength": + stream.SetLength(MEDIUM_OBJECT_SIZE); + break; + case "Write": + stream.Write(buffer, 0, buffer.Length); + break; + } + }); + } + + #endregion + + #region Flush Tests + + [TestMethod] + public void Flush_DoesNotThrow() + { + // Arrange + var stream = CreateStream(); + + // Act + stream.Flush(); // Should not throw + + // Assert - no exception + } + + [TestMethod] + public async Task FlushAsync_Completes() + { + // Arrange + var stream = CreateStream(); + + // Act + await stream.FlushAsync(CancellationToken.None); // Should complete without error + + // Assert - no exception + } + + #endregion + + #region Synchronous Read Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void Read_BeforeInitialize_ThrowsInvalidOperationException() + { + // Arrange + var stream = CreateStream(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act + stream.Read(buffer, 0, buffer.Length); + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var stream = CreateStream(); + + // Act + stream.Dispose(); + stream.Dispose(); // Second call should not throw + + // Assert - no exception + } + + [TestMethod] + public void Dispose_SuppressesExceptions() + { + // Arrange + _mockCoordinator.Setup(x => x.Dispose()).Throws(); + _mockBufferManager.Setup(x => x.Dispose()).Throws(); + + var stream = CreateStream(); + + // Act + stream.Dispose(); // Should not propagate exceptions + + // Assert - no exception thrown + } + + #endregion + + #region Stream Reading Behavior Tests - Empty Object + + [TestMethod] + public async Task ReadAsync_EmptyObject_ReturnsZero() + { + // Arrange - Empty object (0 bytes) + var testData = new byte[0]; + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse(0, null, null, "empty-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client((req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[1024]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(0, bytesRead, "Empty object should return 0 bytes"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Multiple Consecutive Reads + + [TestMethod] + public async Task ReadAsync_MultipleSmallReads_ReturnsAllData() + { + // Arrange - 1KB object, read in 256-byte chunks + var objectSize = 1024; + var chunkSize = 256; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read in multiple small chunks + var allData = new System.Collections.Generic.List(); + var buffer = new byte[chunkSize]; + int bytesRead; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + allData.AddRange(buffer.Take(bytesRead)); + } + + // Assert + Assert.AreEqual(objectSize, allData.Count, "Should read entire object"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, allData.ToArray(), 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_VaryingBufferSizes_ReturnsCorrectData() + { + // Arrange - 2KB object + var objectSize = 2048; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read with varying buffer sizes + var allData = new System.Collections.Generic.List(); + + // First read: 512 bytes + var buffer1 = new byte[512]; + var read1 = await stream.ReadAsync(buffer1, 0, buffer1.Length); + allData.AddRange(buffer1.Take(read1)); + + // Second read: 1KB + var buffer2 = new byte[1024]; + var read2 = await stream.ReadAsync(buffer2, 0, buffer2.Length); + allData.AddRange(buffer2.Take(read2)); + + // Third read: 256 bytes + var buffer3 = new byte[256]; + var read3 = await stream.ReadAsync(buffer3, 0, buffer3.Length); + allData.AddRange(buffer3.Take(read3)); + + // Fourth read: Remaining data + var buffer4 = new byte[1024]; + var read4 = await stream.ReadAsync(buffer4, 0, buffer4.Length); + allData.AddRange(buffer4.Take(read4)); + + // Assert + Assert.AreEqual(objectSize, allData.Count, "Should read entire object"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, allData.ToArray(), 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - End of Stream + + [TestMethod] + public async Task ReadAsync_PastEndOfStream_ReturnsZero() + { + // Arrange - Small object + var objectSize = 512; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream + var buffer = new byte[objectSize]; + var firstRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Try to read again after reaching end + var secondRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(objectSize, firstRead, "First read should return all data"); + Assert.AreEqual(0, secondRead, "Reading past end should return 0"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_MultipleReadsAtEnd_ConsistentlyReturnsZero() + { + // Arrange + var objectSize = 256; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream + var buffer = new byte[objectSize]; + await stream.ReadAsync(buffer, 0, buffer.Length); + + // Try multiple reads after end + var read1 = await stream.ReadAsync(buffer, 0, buffer.Length); + var read2 = await stream.ReadAsync(buffer, 0, buffer.Length); + var read3 = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(0, read1, "First read past end should return 0"); + Assert.AreEqual(0, read2, "Second read past end should return 0"); + Assert.AreEqual(0, read3, "Third read past end should return 0"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Buffer Sizes + + [TestMethod] + public async Task ReadAsync_BufferLargerThanData_ReturnsAvailableData() + { + // Arrange - Small object, large buffer + var objectSize = 512; + var bufferSize = 2048; // Buffer larger than data + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[bufferSize]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(objectSize, bytesRead, "Should return only available data, not buffer size"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_SmallBuffer_RequiresMultipleReads() + { + // Arrange - Larger object, very small buffer + var objectSize = 4096; + var bufferSize = 128; // Very small buffer + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire object with small buffer + var allData = new System.Collections.Generic.List(); + var buffer = new byte[bufferSize]; + int bytesRead; + int readCount = 0; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + allData.AddRange(buffer.Take(bytesRead)); + readCount++; + } + + // Assert + Assert.AreEqual(objectSize, allData.Count, "Should read entire object"); + Assert.IsTrue(readCount >= objectSize / bufferSize, + "Should require multiple reads with small buffer"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, allData.ToArray(), 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Multipart Edge Cases + + [TestMethod] + public async Task ReadAsync_ExactPartBoundary_ReadsCorrectly() + { + // Arrange - Object size exactly equals 2 parts + var partSize = 5 * 1024 * 1024; // 5MB + var totalParts = 2; + var objectSize = partSize * totalParts; // 10MB exactly + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, objectSize, "boundary-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read across part boundary + var bufferSize = (int)(partSize + 1024); // Read across boundary + var buffer = new byte[bufferSize]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, "Should successfully read across part boundary"); + Assert.AreEqual(Math.Min(bufferSize, objectSize), bytesRead, + "Should read requested amount or remaining data"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_NonAlignedPartBoundary_ReadsCorrectly() + { + // Arrange - Object size not aligned to part boundaries + var partSize = 5 * 1024 * 1024; // 5MB + var remainder = 2 * 1024 * 1024; // 2MB + var objectSize = (2 * partSize) + remainder; // 12MB (2 full parts + 2MB) + var totalParts = 3; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, objectSize, "non-aligned-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read in chunks that don't align with part boundaries + var bufferSize = (int)(3 * 1024 * 1024); // 3MB chunks + var allData = new System.Collections.Generic.List(); + var buffer = new byte[bufferSize]; + int bytesRead; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + allData.AddRange(buffer.Take(bytesRead)); + } + + // Assert + Assert.AreEqual(objectSize, allData.Count, + "Should read entire object despite non-aligned boundaries"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_BufferLargerThanPartSize_HandlesCorrectly() + { + // Arrange - Buffer larger than part size + var partSize = 5 * 1024 * 1024; // 5MB parts + var bufferSize = (int)(8 * 1024 * 1024); // 8MB buffer (larger than part) + var objectSize = 15 * 1024 * 1024; // 15MB total + var totalParts = 3; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, objectSize, "large-buffer-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read with buffer larger than part size + var buffer = new byte[bufferSize]; + var firstRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(firstRead > 0, "Should successfully read with large buffer"); + Assert.IsTrue(firstRead <= bufferSize, "Should not read more than buffer size"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Partial Reads + + [TestMethod] + public async Task ReadAsync_PartialBufferFill_ReturnsAvailableData() + { + // Arrange - Request more data than available + var objectSize = 1024; + var requestedSize = 2048; // Request more than available + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[requestedSize]; + var bytesRead = await stream.ReadAsync(buffer, 0, requestedSize); + + // Assert + Assert.AreEqual(objectSize, bytesRead, + "Should return available data, not requested amount"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_ReadIntoMiddleOfBuffer_PositionsCorrectly() + { + // Arrange + var objectSize = 512; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read into middle of buffer + var buffer = new byte[1024]; + var offset = 256; + var count = 512; + var bytesRead = await stream.ReadAsync(buffer, offset, count); + + // Assert + Assert.AreEqual(objectSize, bytesRead, "Should read available data"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, offset, objectSize), + "Data should be at correct offset in buffer"); + + // Cleanup + stream.Dispose(); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs new file mode 100644 index 000000000000..dcd9c2734cae --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs @@ -0,0 +1,585 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class BufferedPartDataHandlerTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesHandler() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + // Act + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Assert + Assert.IsNotNull(handler); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullBufferManager_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + // Act + var handler = new BufferedPartDataHandler(null, config); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfig_ThrowsArgumentNullException() + { + // Arrange + var mockBufferManager = new Mock(); + + // Act + var handler = new BufferedPartDataHandler(mockBufferManager.Object, null); + } + + #endregion + + #region ProcessPartAsync Tests - Basic Functionality + + [TestMethod] + public async Task ProcessPartAsync_BuffersPartData() + { + // Arrange + var partSize = 8 * 1024 * 1024; // 8MB + var partData = new byte[partSize]; + new Random().NextBytes(partData); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = partSize, + ResponseStream = new MemoryStream(partData) + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert - should add buffer to manager + mockBufferManager.Verify( + x => x.AddBufferAsync(It.IsAny(), It.IsAny()), + Times.Once); + } + + [TestMethod] + public async Task ProcessPartAsync_ReadsExactContentLength() + { + // Arrange + var partSize = 1024; + var partData = new byte[partSize]; + new Random().NextBytes(partData); + + StreamPartBuffer capturedBuffer = null; + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) + .Callback((buffer, ct) => capturedBuffer = buffer) + .Returns(Task.CompletedTask); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = partSize, + ResponseStream = new MemoryStream(partData) + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + Assert.IsNotNull(capturedBuffer); + Assert.AreEqual(partSize, capturedBuffer.Length); + Assert.AreEqual(1, capturedBuffer.PartNumber); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesSmallPart() + { + // Arrange + var partSize = 100; // Very small + var partData = new byte[partSize]; + new Random().NextBytes(partData); + + StreamPartBuffer capturedBuffer = null; + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) + .Callback((buffer, ct) => capturedBuffer = buffer) + .Returns(Task.CompletedTask); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = partSize, + ResponseStream = new MemoryStream(partData) + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + Assert.IsNotNull(capturedBuffer); + Assert.AreEqual(partSize, capturedBuffer.Length); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesLargePart() + { + // Arrange + var partSize = 16 * 1024 * 1024; // 16MB + var partData = new byte[partSize]; + new Random().NextBytes(partData); + + StreamPartBuffer capturedBuffer = null; + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) + .Callback((buffer, ct) => capturedBuffer = buffer) + .Returns(Task.CompletedTask); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = partSize, + ResponseStream = new MemoryStream(partData) + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + Assert.IsNotNull(capturedBuffer); + Assert.AreEqual(partSize, capturedBuffer.Length); + } + + #endregion + + + #region ProcessPartAsync Tests - Data Integrity + + [TestMethod] + public async Task ProcessPartAsync_PreservesDataIntegrity() + { + // Arrange + var partSize = 1024 * 1024; // 1MB + var partData = new byte[partSize]; + new Random(42).NextBytes(partData); // Seeded for reproducibility + + StreamPartBuffer capturedBuffer = null; + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) + .Callback((buffer, ct) => capturedBuffer = buffer) + .Returns(Task.CompletedTask); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = partSize, + ResponseStream = new MemoryStream(partData) + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert - verify data matches exactly + Assert.IsNotNull(capturedBuffer); + var bufferedData = new byte[capturedBuffer.Length]; + Buffer.BlockCopy(capturedBuffer.ArrayPoolBuffer, 0, bufferedData, 0, capturedBuffer.Length); + + CollectionAssert.AreEqual(partData, bufferedData); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesZeroByteResponse() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = 0, + ResponseStream = new MemoryStream(Array.Empty()) + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert - should handle empty response gracefully + mockBufferManager.Verify( + x => x.AddBufferAsync(It.IsAny(), It.IsAny()), + Times.Once); + } + + [TestMethod] + public async Task ProcessPartAsync_WithUnexpectedEOF_ThrowsIOException() + { + // Arrange + var expectedBytes = 1024 * 1024; // 1MB expected + var actualBytes = 512 * 1024; // 512KB available (premature EOF) + var partData = new byte[actualBytes]; + new Random().NextBytes(partData); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Create a response that promises more bytes than it delivers + var response = new GetObjectResponse + { + ContentLength = expectedBytes, // Promise 1MB + ResponseStream = new MemoryStream(partData) // Only deliver 512KB + }; + + // Act & Assert + var exception = await Assert.ThrowsExceptionAsync( + async () => await handler.ProcessPartAsync(1, response, CancellationToken.None)); + + // Verify exception message contains key information + StringAssert.Contains(exception.Message, "Unexpected end of stream"); + StringAssert.Contains(exception.Message, "part 1"); + StringAssert.Contains(exception.Message, expectedBytes.ToString()); + StringAssert.Contains(exception.Message, actualBytes.ToString()); + } + + [TestMethod] + public async Task ProcessPartAsync_WithUnexpectedEOF_DoesNotBufferPartialData() + { + // Arrange + var expectedBytes = 1024 * 1024; // 1MB expected + var actualBytes = 512 * 1024; // 512KB available (premature EOF) + var partData = new byte[actualBytes]; + new Random().NextBytes(partData); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = expectedBytes, + ResponseStream = new MemoryStream(partData) + }; + + // Act + try + { + await handler.ProcessPartAsync(1, response, CancellationToken.None); + Assert.Fail("Expected IOException was not thrown"); + } + catch (IOException) + { + // Expected + } + + // Assert - should NOT have added any buffer to manager since download failed + mockBufferManager.Verify( + x => x.AddBufferAsync(It.IsAny(), It.IsAny()), + Times.Never); + } + + #endregion + + #region ProcessPartAsync Tests - Cancellation + + [TestMethod] + [ExpectedException(typeof(TaskCanceledException))] + public async Task ProcessPartAsync_WithCancelledToken_ThrowsTaskCanceledException() + { + // Arrange + var partSize = 8 * 1024 * 1024; + var partData = new byte[partSize]; + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = partSize, + ResponseStream = new MemoryStream(partData) + }; + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await handler.ProcessPartAsync(1, response, cts.Token); + } + + [TestMethod] + public async Task ProcessPartAsync_PassesCancellationTokenToBufferManager() + { + // Arrange + var partSize = 1024; + var partData = new byte[partSize]; + + CancellationToken capturedToken = default; + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) + .Callback((buffer, ct) => capturedToken = ct) + .Returns(Task.CompletedTask); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var response = new GetObjectResponse + { + ContentLength = partSize, + ResponseStream = new MemoryStream(partData) + }; + + var cts = new CancellationTokenSource(); + + // Act + await handler.ProcessPartAsync(1, response, cts.Token); + + // Assert + Assert.AreEqual(cts.Token, capturedToken); + } + + #endregion + + #region WaitForCapacityAsync Tests + + [TestMethod] + public async Task WaitForCapacityAsync_DelegatesToBufferManager() + { + // Arrange + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.WaitForBufferSpaceAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + await handler.WaitForCapacityAsync(CancellationToken.None); + + // Assert + mockBufferManager.Verify( + x => x.WaitForBufferSpaceAsync(It.IsAny()), + Times.Once); + } + + [TestMethod] + public async Task WaitForCapacityAsync_PassesCancellationToken() + { + // Arrange + CancellationToken capturedToken = default; + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.WaitForBufferSpaceAsync(It.IsAny())) + .Callback(ct => capturedToken = ct) + .Returns(Task.CompletedTask); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var cts = new CancellationTokenSource(); + + // Act + await handler.WaitForCapacityAsync(cts.Token); + + // Assert + Assert.AreEqual(cts.Token, capturedToken); + } + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task WaitForCapacityAsync_WhenCancelled_ThrowsOperationCanceledException() + { + // Arrange + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.WaitForBufferSpaceAsync(It.IsAny())) + .ThrowsAsync(new OperationCanceledException()); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await handler.WaitForCapacityAsync(cts.Token); + } + + #endregion + + #region ReleaseCapacity Tests + + [TestMethod] + public void ReleaseCapacity_DelegatesToBufferManager() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + handler.ReleaseCapacity(); + + // Assert + mockBufferManager.Verify(x => x.ReleaseBufferSpace(), Times.Once); + } + + [TestMethod] + public void ReleaseCapacity_CanBeCalledMultipleTimes() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + handler.ReleaseCapacity(); + handler.ReleaseCapacity(); + handler.ReleaseCapacity(); + + // Assert + mockBufferManager.Verify(x => x.ReleaseBufferSpace(), Times.Exactly(3)); + } + + #endregion + + #region OnDownloadComplete Tests + + [TestMethod] + public void OnDownloadComplete_WithNullException_DelegatesToBufferManager() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + handler.OnDownloadComplete(null); + + // Assert + mockBufferManager.Verify( + x => x.MarkDownloadComplete(null), + Times.Once); + } + + [TestMethod] + public void OnDownloadComplete_WithException_PassesExceptionToBufferManager() + { + // Arrange + var testException = new InvalidOperationException("Test error"); + Exception capturedEx = null; + + var mockBufferManager = new Mock(); + mockBufferManager.Setup(x => x.MarkDownloadComplete(It.IsAny())) + .Callback(ex => capturedEx = ex); + + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + handler.OnDownloadComplete(testException); + + // Assert + Assert.AreEqual(testException, capturedEx); + } + + [TestMethod] + public void OnDownloadComplete_WithCancelledException_PassesToBufferManager() + { + // Arrange + var testException = new OperationCanceledException(); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + handler.OnDownloadComplete(testException); + + // Assert + mockBufferManager.Verify( + x => x.MarkDownloadComplete(It.Is(e => e == testException)), + Times.Once); + } + + [TestMethod] + public void OnDownloadComplete_CanBeCalledMultipleTimes() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act - calling multiple times should work + handler.OnDownloadComplete(null); + handler.OnDownloadComplete(new Exception("test")); + handler.OnDownloadComplete(null); + + // Assert + mockBufferManager.Verify( + x => x.MarkDownloadComplete(It.IsAny()), + Times.Exactly(3)); + } + + #endregion + + #region Dispose Tests + + [TestMethod] + public void Dispose_DoesNotDisposeBufferManager() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + handler.Dispose(); + + // Assert - BufferManager is owned by caller, should not be disposed + mockBufferManager.Verify(x => x.Dispose(), Times.Never); + } + + [TestMethod] + public void Dispose_CanBeCalledMultipleTimes() + { + // Arrange + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act + handler.Dispose(); + handler.Dispose(); // Should not throw + + // Assert - no exception + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs new file mode 100644 index 000000000000..41bb77ce57be --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -0,0 +1,1623 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Buffers; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class MultipartDownloadManagerTests + { + private Mock CreateMockDataHandler() + { + var mockHandler = new Mock(); + mockHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + mockHandler.Setup(x => x.ReleaseCapacity()); + mockHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + return mockHandler; + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesCoordinator() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockDataHandler = CreateMockDataHandler(); + + // Act + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Assert + Assert.IsNotNull(coordinator); + Assert.IsNull(coordinator.DownloadException); + } + + [DataTestMethod] + [DataRow(true, false, false, false, DisplayName = "Null S3Client")] + [DataRow(false, true, false, false, DisplayName = "Null Request")] + [DataRow(false, false, true, false, DisplayName = "Null Config")] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullParameter_ThrowsArgumentNullException( + bool nullClient, bool nullRequest, bool nullConfig, bool nullHandler) + { + // Arrange + var client = nullClient ? null : MultipartDownloadTestHelpers.CreateMockS3Client().Object; + var request = nullRequest ? null : MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = nullConfig ? null : MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = nullHandler ? null : CreateMockDataHandler().Object; + + // Act + var coordinator = new MultipartDownloadManager(client, request, config, handler); + } + + #endregion + + #region Property Tests + + [TestMethod] + public void DownloadException_InitiallyNull() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var exception = coordinator.DownloadException; + + // Assert + Assert.IsNull(exception); + } + + #endregion + + #region Discovery - PART Strategy - Single Part Tests + + [TestMethod] + public async Task DiscoverUsingPartStrategy_WithNullPartsCount_ReturnsSinglePart() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: 1024 * 1024, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(1, result.TotalParts); + Assert.AreEqual(1024 * 1024, result.ObjectSize); + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_WithPartsCountOne_ReturnsSinglePart() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: 1024 * 1024, + partsCount: 1, + contentRange: null, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, result.TotalParts); + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_SinglePart_DoesNotBufferFirstPart() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(objectSize: 1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert - Single-part does not buffer during discovery + Assert.IsNotNull(result.InitialResponse); + } + + #endregion + + #region Discovery - PART Strategy - Multipart Tests + + [TestMethod] + public async Task DiscoverUsingPartStrategy_WithMultipleParts_ReturnsMultipart() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; // 50MB + var partSize = 10 * 1024 * 1024; // 10MB + var totalParts = 5; + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "multipart-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(5, result.TotalParts); + Assert.AreEqual(totalObjectSize, result.ObjectSize); + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_Multipart_BuffersFirstPart() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; + var partSize = 10 * 1024 * 1024; + var totalParts = 5; + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "multipart-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert - Multipart returns response with stream for buffering in StartDownloadsAsync + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_SavesETag() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + 8 * 1024 * 1024, 5, 40 * 1024 * 1024, "saved-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert - ETag is saved internally (verified through subsequent validation) + Assert.IsNotNull(result); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_ParsesContentRange() + { + // Arrange + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + var contentRange = $"bytes 0-{partSize - 1}/{totalObjectSize}"; + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: partSize, + partsCount: 7, + contentRange: contentRange, + eTag: "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(totalObjectSize, result.ObjectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task DiscoverUsingPartStrategy_WithInvalidContentRange_ThrowsException() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: 8 * 1024 * 1024, + partsCount: 5, + contentRange: "invalid-format", + eTag: "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + } + + #endregion + + #region Discovery - RANGE Strategy - Small Object Tests + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_SmallObject_ReturnsSinglePart() + { + // Arrange + var objectSize = 1024 * 1024; // 1MB + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: objectSize, + partsCount: null, + contentRange: null, // No ContentRange means entire small object + eTag: "small-object-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, result.TotalParts); + Assert.AreEqual(objectSize, result.ObjectSize); + Assert.IsNotNull(result.InitialResponse); + } + + #endregion + + #region Discovery - RANGE Strategy - Single Part from Range Tests + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_SinglePartRange_ReturnsSinglePart() + { + // Arrange + var objectSize = 5 * 1024 * 1024; // 5MB + var contentRange = $"bytes 0-{objectSize - 1}/{objectSize}"; + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: objectSize, + partsCount: null, + contentRange: contentRange, + eTag: "single-range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, result.TotalParts); + Assert.IsNotNull(result.InitialResponse); + } + + #endregion + + #region Discovery - RANGE Strategy - Multipart Tests + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_Multipart_ReturnsMultipart() + { + // Arrange + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + var contentRange = $"bytes 0-{partSize - 1}/{totalObjectSize}"; + + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(7, result.TotalParts); // 52428800 / 8388608 = 6.25 -> 7 parts + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task DiscoverUsingRangeStrategy_Multipart_ValidatesContentLength() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; + var partSize = 8 * 1024 * 1024; + var wrongPartSize = 5 * 1024 * 1024; // ContentLength doesn't match requested part size + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: wrongPartSize, + partsCount: null, + contentRange: $"bytes 0-{wrongPartSize - 1}/{totalObjectSize}", + eTag: "range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + } + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_SavesETag() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, 8 * 1024 * 1024 - 1, 50 * 1024 * 1024, "saved-range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert - ETag is saved internally + Assert.IsNotNull(result); + } + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_CalculatesPartCount() + { + // Arrange + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(7, result.TotalParts); // Ceiling(52428800 / 8388608) = 7 + } + + #endregion + + #region StartDownloadsAsync Tests - Setup + + [TestMethod] + public async Task StartDownloadsAsync_SinglePart_ReturnsImmediately() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = new GetObjectResponse() + }; + + var mockBufferManager = new Mock(); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + + // Assert - should complete without any downloads + mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Never); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task StartDownloadsAsync_WithNullDiscoveryResult_ThrowsArgumentNullException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + await coordinator.StartDownloadsAsync(null, CancellationToken.None); + } + + #endregion + + #region Validation Tests + + [DataTestMethod] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.MissingContentRange, DisplayName = "Missing ContentRange")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.InvalidContentRangeFormat, DisplayName = "Invalid ContentRange Format")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.UnparseableRange, DisplayName = "Unparseable Range")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.RangeMismatch, DisplayName = "Range Mismatch")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.ETagMismatch, DisplayName = "ETag Mismatch")] + [ExpectedException(typeof(InvalidOperationException))] + public async Task Validation_Failures_ThrowInvalidOperationException( + MultipartDownloadTestHelpers.ValidationFailureType failureType) + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockClientWithValidationFailure(failureType); + var coordinator = MultipartDownloadTestHelpers.CreateCoordinatorForValidationTest(mockClient.Object, failureType); + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act & Assert (exception expected via attribute) + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + + [TestMethod] + public async Task Validation_ETag_Matching_Succeeds() + { + // Arrange - All parts have consistent ETag + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + var consistentETag = "consistent-etag"; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, consistentETag, usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act - should succeed with matching ETags + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + + // Assert - no exception thrown + } + + [TestMethod] + public async Task Validation_ContentRange_ValidRange_Succeeds() + { + // Arrange - RANGE strategy with correct ContentRange + var totalObjectSize = 20 * 1024 * 1024; + var partSize = 8 * 1024 * 1024; + + // All three parts have correct ranges + var firstPartResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "test-etag"); + + var secondPartResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + partSize, 2 * partSize - 1, totalObjectSize, "test-etag"); + + var thirdPartResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 2 * partSize, totalObjectSize - 1, totalObjectSize, "test-etag"); + + int callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) return Task.FromResult(firstPartResponse); + if (callCount == 2) return Task.FromResult(secondPartResponse); + return Task.FromResult(thirdPartResponse); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act - should succeed with valid ranges + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + + // Assert - no exception thrown + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.Dispose(); + coordinator.Dispose(); // Second call should not throw + + // Assert - no exception thrown + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task Operations_AfterDispose_ThrowObjectDisposedException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.Dispose(); + await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + } + + #endregion + + #region Cancellation Token Tests + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task DiscoverDownloadStrategyAsync_WhenCancelled_ThrowsOperationCanceledException() + { + // Arrange + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new OperationCanceledException()); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + } + + [TestMethod] + public async Task DiscoverDownloadStrategyAsync_WhenCancelled_SetsDownloadException() + { + // Arrange + var mockClient = new Mock(); + var cancelledException = new OperationCanceledException(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(cancelledException); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + try + { + await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert + Assert.IsNotNull(coordinator.DownloadException); + Assert.IsInstanceOfType(coordinator.DownloadException, typeof(OperationCanceledException)); + } + + [TestMethod] + public async Task DiscoverDownloadStrategyAsync_PassesCancellationTokenToS3Client() + { + // Arrange + CancellationToken capturedToken = default; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Callback((req, ct) => capturedToken = ct) + .ReturnsAsync(MultipartDownloadTestHelpers.CreateSinglePartResponse(1024)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + + // Act + await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + + // Assert + Assert.AreEqual(cts.Token, capturedToken); + } + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task StartDownloadsAsync_WhenCancelledBeforeStart_ThrowsOperationCanceledException() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, cts.Token); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + + [TestMethod] + public async Task StartDownloadsAsync_WhenCancelledDuringDownloads_NotifiesBufferManager() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // First call (discovery) succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + else + { + // Subsequent calls (downloads) throw cancellation + throw new OperationCanceledException(); + } + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + try + { + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert + Assert.IsNotNull(coordinator.DownloadException); + Assert.IsInstanceOfType(coordinator.DownloadException, typeof(OperationCanceledException)); + } + + [TestMethod] + public async Task StartDownloadsAsync_WhenCancelled_SetsDownloadException() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + try + { + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert + Assert.IsNotNull(coordinator.DownloadException); + Assert.IsInstanceOfType(coordinator.DownloadException, typeof(OperationCanceledException)); + } + + [TestMethod] + public async Task StartDownloadsAsync_PassesCancellationTokenToBufferManager() + { + // Arrange + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + var cts = new CancellationTokenSource(); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, cts.Token); + + // Assert - The cancellation token was passed through to the data handler + Assert.IsNotNull(discoveryResult); + } + + [TestMethod] + public async Task StartDownloadsAsync_SinglePart_DoesNotThrowOnCancellation() + { + // Arrange - Single part download should return immediately without using cancellation token + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = new GetObjectResponse() + }; + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act - should complete without throwing even though token is cancelled + await coordinator.StartDownloadsAsync(discoveryResult, cts.Token); + + // Assert - no exception thrown, no S3 calls made + mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Never); + } + + [TestMethod] + public async Task StartDownloadsAsync_CancellationPropagatesAcrossConcurrentDownloads() + { + // Arrange - Multiple concurrent downloads, one fails with cancellation + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // Discovery call succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + else if (callCount == 2) + { + // Second download (part 2) throws cancellation + throw new OperationCanceledException(); + } + else + { + // Other downloads should also be cancelled + return Task.FromResult(MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + partSize, totalParts, + $"bytes {(callCount - 1) * partSize}-{callCount * partSize - 1}/{totalObjectSize}", + "test-etag")); + } + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + try + { + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - Error should be captured + Assert.IsNotNull(coordinator.DownloadException); + } + + [TestMethod] + public async Task Coordinator_CanBeDisposedAfterCancellation() + { + // Arrange + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new OperationCanceledException()); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + try + { + await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Dispose should not throw + coordinator.Dispose(); + + // Assert - Multiple disposes should also work + coordinator.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task StartDownloadsAsync_RangeStrategy_CancellationDuringDownloads() + { + // Arrange - RANGE strategy cancellation + var totalObjectSize = 20 * 1024 * 1024; + var partSize = 8 * 1024 * 1024; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // Discovery succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "test-etag")); + } + // Part 2 download throws cancellation + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + + #endregion + + #region Deadlock Prevention Tests + + [TestMethod] + public async Task StartDownloadsAsync_ReturnsImmediately_PreventsDeadlock() + { + // Arrange - Create a scenario where buffer would fill during downloads + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Track download state + var downloadsStarted = new System.Collections.Concurrent.ConcurrentBag(); + var bufferBlockingStarted = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + // Simulate WaitForCapacityAsync being called (downloads are actively buffering) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + bufferBlockingStarted.TrySetResult(true); + // Return immediately to allow downloads to proceed + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Callback((partNum, _, __) => + { + downloadsStarted.Add(partNum); + }) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act - StartDownloadsAsync should return immediately (not wait for all downloads) + var stopwatch = System.Diagnostics.Stopwatch.StartNew(); + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + stopwatch.Stop(); + + // Assert - StartDownloadsAsync should return almost immediately + // The key is it returns BEFORE all downloads complete, allowing consumer to start reading + Assert.IsTrue(stopwatch.ElapsedMilliseconds < 1000, + $"StartDownloadsAsync should return immediately, took {stopwatch.ElapsedMilliseconds}ms"); + + // Verify Part 1 was processed (synchronously during StartDownloadsAsync) + Assert.IsTrue(downloadsStarted.Contains(1), "Part 1 should be processed synchronously"); + + // Wait for background downloads to start + var bufferCalledTask = Task.WhenAny(bufferBlockingStarted.Task, Task.Delay(2000)); + await bufferCalledTask; + Assert.IsTrue(bufferBlockingStarted.Task.IsCompleted, + "Background downloads should have started after StartDownloadsAsync returned"); + + // Verify DownloadCompletionTask exists and is for background work + Assert.IsNotNull(coordinator.DownloadCompletionTask, + "DownloadCompletionTask should be set for multipart downloads"); + + // Wait for all background downloads to complete + await coordinator.DownloadCompletionTask; + + // Verify all parts were eventually processed + Assert.AreEqual(totalParts, downloadsStarted.Count, + "All parts should be processed in background"); + } + + [TestMethod] + public async Task StartDownloadsAsync_SinglePart_ReturnsImmediatelyWithoutBackgroundTask() + { + // Arrange - Single-part downloads should not create background tasks + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + var mockDataHandler = CreateMockDataHandler(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = new GetObjectResponse() + }; + + // Act + var stopwatch = System.Diagnostics.Stopwatch.StartNew(); + await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + stopwatch.Stop(); + + // DownloadCompletionTask should be completed immediately (no background work) + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "DownloadCompletionTask should be completed for single-part downloads"); + + // Verify OnDownloadComplete was called + mockDataHandler.Verify(x => x.OnDownloadComplete(null), Times.Once); + } + + #endregion + + #region ContentRange and Part Range Calculation Tests + + [TestMethod] + public void ParseContentRange_ValidFormat_ReturnsCorrectValues() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-8388607/52428800"; + + // Act + var (startByte, endByte, totalSize) = coordinator.ParseContentRange(contentRange); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(8388607L, endByte); + Assert.AreEqual(52428800L, totalSize); + } + + [TestMethod] + public void ParseContentRange_SingleByteRange_ReturnsCorrectValues() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-0/1"; + + // Act + var (startByte, endByte, totalSize) = coordinator.ParseContentRange(contentRange); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(0L, endByte); + Assert.AreEqual(1L, totalSize); + } + + [TestMethod] + public void ParseContentRange_LargeFileLastPart_ReturnsCorrectValues() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 50331648-52428799/52428800"; + + // Act + var (startByte, endByte, totalSize) = coordinator.ParseContentRange(contentRange); + + // Assert + Assert.AreEqual(50331648L, startByte); + Assert.AreEqual(52428799L, endByte); + Assert.AreEqual(52428800L, totalSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_NullContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange(null); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_EmptyContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange(string.Empty); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_InvalidFormat_NoSlash_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes 0-1000"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_InvalidFormat_NoDash_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes 01000/5000"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_InvalidFormat_NonNumericRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes abc-def/5000"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_WildcardTotalSize_ThrowsInvalidOperationExceptionWithMessage() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act & Assert + try + { + coordinator.ParseContentRange("bytes 0-1000/*"); + Assert.Fail("Expected InvalidOperationException was not thrown"); + } + catch (InvalidOperationException ex) + { + Assert.IsTrue(ex.Message.Contains("Unexpected wildcard")); + Assert.IsTrue(ex.Message.Contains("S3 always returns exact object sizes")); + throw; + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_NonNumericTotalSize_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes 0-1000/abc"); + } + + [TestMethod] + public void ExtractTotalSizeFromContentRange_ValidFormat_ReturnsTotalSize() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-8388607/52428800"; + + // Act + var totalSize = coordinator.ExtractTotalSizeFromContentRange(contentRange); + + // Assert + Assert.AreEqual(52428800L, totalSize); + } + + [TestMethod] + public void ExtractTotalSizeFromContentRange_SmallFile_ReturnsTotalSize() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-999/1000"; + + // Act + var totalSize = coordinator.ExtractTotalSizeFromContentRange(contentRange); + + // Assert + Assert.AreEqual(1000L, totalSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ExtractTotalSizeFromContentRange_InvalidFormat_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ExtractTotalSizeFromContentRange("invalid-format"); + } + + [TestMethod] + public void CalculatePartRange_FirstPart_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 50 * 1024 * 1024; // 50MB + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(1, objectSize); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(8 * 1024 * 1024 - 1, endByte); + } + + [TestMethod] + public void CalculatePartRange_MiddlePart_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 50 * 1024 * 1024; // 50MB + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(3, objectSize); + + // Assert + Assert.AreEqual(2 * 8 * 1024 * 1024, startByte); // Part 3 starts at 16MB + Assert.AreEqual(3 * 8 * 1024 * 1024 - 1, endByte); // Part 3 ends at 24MB - 1 + } + + [TestMethod] + public void CalculatePartRange_LastPartFullSize_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 48 * 1024 * 1024; // 48MB (exactly 6 parts) + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(6, objectSize); + + // Assert + Assert.AreEqual(5 * 8 * 1024 * 1024, startByte); // Part 6 starts at 40MB + Assert.AreEqual(48 * 1024 * 1024 - 1, endByte); // Part 6 ends at object end + } + + [TestMethod] + public void CalculatePartRange_LastPartPartialSize_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 52428800; // 50MB (7 parts with last part partial) + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(7, objectSize); + + // Assert + Assert.AreEqual(6 * 8 * 1024 * 1024, startByte); // Part 7 starts at 48MB + Assert.AreEqual(52428800 - 1, endByte); // Part 7 ends at object end (partial part) + } + + [TestMethod] + public void CalculatePartRange_SmallObject_SinglePart_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 1024; // 1KB + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(1, objectSize); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(1023L, endByte); // 1KB - 1 + } + + [TestMethod] + public void ValidateContentRange_RangeStrategy_ValidRange_DoesNotThrow() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 0-8388607/52428800" + }; + var objectSize = 52428800L; + + // Act - should not throw + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + public void ValidateContentRange_RangeStrategy_MiddlePart_ValidRange_DoesNotThrow() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 16777216-25165823/52428800" + }; + var objectSize = 52428800L; + + // Act - should not throw + coordinator.ValidateContentRange(response, 3, objectSize); + } + + [TestMethod] + public void ValidateContentRange_PartStrategy_DoesNotValidate() + { + // Arrange - PART strategy should skip validation + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 0-8388607/52428800" // Valid range + }; + var objectSize = 52428800L; + + // Act - should not throw and should not validate + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_MissingContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = null + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_EmptyContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = string.Empty + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_WrongStartByte_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Expected: bytes 0-8388607, Actual: bytes 100-8388607 (wrong start) + var response = new GetObjectResponse + { + ContentRange = "bytes 100-8388607/52428800" + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_WrongEndByte_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Expected: bytes 0-8388607, Actual: bytes 0-8388600 (wrong end) + var response = new GetObjectResponse + { + ContentRange = "bytes 0-8388600/52428800" + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + public void ValidateContentRange_RangeStrategy_ExceptionMessage_ContainsExpectedAndActualRanges() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 100-8388607/52428800" + }; + var objectSize = 52428800L; + + // Act & Assert + try + { + coordinator.ValidateContentRange(response, 1, objectSize); + Assert.Fail("Expected InvalidOperationException was not thrown"); + } + catch (InvalidOperationException ex) + { + Assert.IsTrue(ex.Message.Contains("ContentRange mismatch")); + Assert.IsTrue(ex.Message.Contains("Expected: bytes 0-8388607")); + Assert.IsTrue(ex.Message.Contains("Actual: bytes 100-8388607")); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs new file mode 100644 index 000000000000..49665433244b --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs @@ -0,0 +1,596 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Moq; +using System; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Shared test utilities and helper methods for multipart download tests. + /// Provides mock object creation, test data generation, and common test scenarios. + /// + public static class MultipartDownloadTestHelpers + { + #region Test Constants + + public const int DefaultPartSize = 8 * 1024 * 1024; // 8MB + public const int SmallPartSize = 5 * 1024 * 1024; // 5MB + public const int BufferSize = 8192; // 8KB + public const int DefaultConcurrentRequests = 10; + public const int DefaultMaxInMemoryParts = 5; + + #endregion + + #region GetObjectResponse Creation + + /// + /// Creates a GetObjectResponse with configurable properties for testing. + /// + public static GetObjectResponse CreateMockGetObjectResponse( + long contentLength, + int? partsCount = null, + string contentRange = null, + string eTag = "test-etag", + byte[] testData = null, + bool includeHeaders = true) + { + return CreateMockGetObjectResponseWithEncryption( + contentLength, + partsCount, + contentRange, + eTag, + testData, + includeHeaders, + ServerSideEncryptionMethod.AES256, + null); + } + + /// + /// Creates a GetObjectResponse with configurable properties including encryption settings. + /// + public static GetObjectResponse CreateMockGetObjectResponseWithEncryption( + long contentLength, + int? partsCount, + string contentRange, + string eTag, + byte[] testData, + bool includeHeaders, + ServerSideEncryptionMethod serverSideEncryptionMethod, + string serverSideEncryptionKeyManagementServiceKeyId) + { + var response = new GetObjectResponse(); + + // Set ContentLength + response.ContentLength = contentLength; + + // Set ETag + response.ETag = eTag; + + // PartsCount (for multipart uploads) + if (partsCount.HasValue) + { + response.PartsCount = partsCount.Value; + } + + // ContentRange (for range requests) + if (contentRange != null) + { + response.ContentRange = contentRange; + } + + // ResponseStream with test data + if (testData == null) + { + testData = GenerateTestData((int)contentLength, 0); + } + response.ResponseStream = new MemoryStream(testData); + + // Headers + if (includeHeaders) + { + response.Headers["x-amz-server-side-encryption"] = "AES256"; + } + + // Server-side encryption + response.ServerSideEncryptionMethod = serverSideEncryptionMethod; + + // KMS key ID (if provided) + if (!string.IsNullOrEmpty(serverSideEncryptionKeyManagementServiceKeyId)) + { + response.ServerSideEncryptionKeyManagementServiceKeyId = serverSideEncryptionKeyManagementServiceKeyId; + } + + return response; + } + + /// + /// Creates a GetObjectResponse for a single-part download scenario. + /// + public static GetObjectResponse CreateSinglePartResponse( + long objectSize, + string eTag = "single-part-etag") + { + return CreateMockGetObjectResponse( + contentLength: objectSize, + partsCount: null, // No PartsCount indicates single part + contentRange: null, + eTag: eTag); + } + + /// + /// Creates a GetObjectResponse for the first part of a multipart download (PART strategy). + /// + public static GetObjectResponse CreateMultipartFirstPartResponse( + long partSize, + int totalParts, + long totalObjectSize, + string eTag = "multipart-etag") + { + // ContentRange format: "bytes 0-{partSize-1}/{totalObjectSize}" + var contentRange = $"bytes 0-{partSize - 1}/{totalObjectSize}"; + + return CreateMockGetObjectResponse( + contentLength: partSize, + partsCount: totalParts, + contentRange: contentRange, + eTag: eTag); + } + + /// + /// Creates a GetObjectResponse for a range request (RANGE strategy). + /// + public static GetObjectResponse CreateRangeResponse( + long rangeStart, + long rangeEnd, + long totalObjectSize, + string eTag = "range-etag") + { + var rangeSize = rangeEnd - rangeStart + 1; + var contentRange = $"bytes {rangeStart}-{rangeEnd}/{totalObjectSize}"; + + return CreateMockGetObjectResponse( + contentLength: rangeSize, + partsCount: null, + contentRange: contentRange, + eTag: eTag); + } + + #endregion + + #region Mock S3 Client Creation + + /// + /// Creates a mock S3 client with configurable GetObjectAsync behavior. + /// + public static Mock CreateMockS3Client( + Func> getObjectBehavior = null) + { + var mockClient = new Mock(); + + if (getObjectBehavior != null) + { + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(getObjectBehavior); + } + + // Setup Config property - BufferSize is not virtual, so set directly + var mockConfig = new Mock(); + mockConfig.Object.BufferSize = BufferSize; + mockClient.Setup(x => x.Config).Returns(mockConfig.Object); + + return mockClient; + } + + /// + /// Creates a mock S3 client that returns responses for multiple parts in sequence. + /// + public static Mock CreateMockS3ClientForMultipart( + int totalParts, + long partSize, + long totalObjectSize, + string eTag = "multipart-etag", + bool usePartStrategy = true) + { + var partResponses = new GetObjectResponse[totalParts]; + + for (int i = 0; i < totalParts; i++) + { + int partNumber = i + 1; + long actualPartSize = (partNumber == totalParts) + ? totalObjectSize - (partSize * (totalParts - 1)) // Last part may be smaller + : partSize; + + GetObjectResponse response; + + if (usePartStrategy) + { + // PART strategy: First part has PartsCount + if (partNumber == 1) + { + response = CreateMultipartFirstPartResponse(actualPartSize, totalParts, totalObjectSize, eTag); + } + else + { + var contentRange = $"bytes {(partNumber - 1) * partSize}-{(partNumber - 1) * partSize + actualPartSize - 1}/{totalObjectSize}"; + response = CreateMockGetObjectResponse(actualPartSize, totalParts, contentRange, eTag); + } + } + else + { + // RANGE strategy: Use byte ranges + long rangeStart = (partNumber - 1) * partSize; + long rangeEnd = rangeStart + actualPartSize - 1; + response = CreateRangeResponse(rangeStart, rangeEnd, totalObjectSize, eTag); + } + + partResponses[i] = response; + } + + var callCount = 0; + return CreateMockS3Client((request, ct) => + { + var responseIndex = Interlocked.Increment(ref callCount) - 1; + if (responseIndex >= partResponses.Length) + throw new InvalidOperationException($"Unexpected GetObjectAsync call #{responseIndex + 1}"); + + return Task.FromResult(partResponses[responseIndex]); + }); + } + + #endregion + + #region Test Data Generation + + /// + /// Generates predictable test data with a repeating pattern for verification. + /// + public static byte[] GenerateTestData(int size, int seed = 0) + { + return Enumerable.Range(seed, size).Select(i => (byte)(i % 256)).ToArray(); + } + + /// + /// Generates test data with a part-specific pattern (all bytes set to part number). + /// + public static byte[] GeneratePartSpecificData(int size, int partNumber) + { + return Enumerable.Repeat((byte)(partNumber % 256), size).ToArray(); + } + + /// + /// Generates mixed pattern data for boundary testing. + /// + public static byte[] CreateMixedPattern(int size, int seed) + { + var random = new Random(seed); + var data = new byte[size]; + + // Create a pattern with different regions + for (int i = 0; i < size; i++) + { + if (i < size / 3) + data[i] = (byte)(i % 256); // Sequential + else if (i < 2 * size / 3) + data[i] = (byte)random.Next(256); // Random + else + data[i] = (byte)((size - i) % 256); // Reverse sequential + } + + return data; + } + + /// + /// Verifies that two byte arrays are identical. + /// + public static bool VerifyDataMatch(byte[] expected, byte[] actual, int offset, int count) + { + if (actual == null || expected == null) + return false; + + if (offset + count > actual.Length || count > expected.Length) + return false; + + for (int i = 0; i < count; i++) + { + if (actual[offset + i] != expected[i]) + return false; + } + + return true; + } + + #endregion + + #region BufferedDownloadConfiguration Creation + + /// + /// Creates a default BufferedDownloadConfiguration for testing. + /// + internal static BufferedDownloadConfiguration CreateBufferedDownloadConfiguration( + int concurrentRequests = DefaultConcurrentRequests, + int maxInMemoryParts = DefaultMaxInMemoryParts, + int bufferSize = BufferSize, + long partSize = DefaultPartSize) + { + return new BufferedDownloadConfiguration( + concurrentRequests, + maxInMemoryParts, + bufferSize, + partSize); + } + + /// + /// Creates a BufferedDownloadConfiguration with minimal settings for testing. + /// + internal static BufferedDownloadConfiguration CreateMinimalBufferedDownloadConfiguration() + { + return new BufferedDownloadConfiguration(1, 1, 1024, 8 * 1024 * 1024); + } + + #endregion + + #region Mock Request Creation + + /// + /// Creates a mock TransferUtilityOpenStreamRequest for testing. + /// + public static TransferUtilityOpenStreamRequest CreateOpenStreamRequest( + string bucketName = "test-bucket", + string key = "test-key", + long? partSize = null, + MultipartDownloadType downloadType = MultipartDownloadType.PART) + { + var request = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + MultipartDownloadType = downloadType + }; + + if (partSize.HasValue) + { + request.PartSize = partSize.Value; + } + + return request; + } + + #endregion + + #region Advanced Mock Creation Helpers + + /// + /// Creates a mock S3 client that returns responses sequentially. + /// + public static Mock CreateSequentialMockClient(params GetObjectResponse[] responses) + { + var callCount = 0; + return CreateMockS3Client((request, ct) => + { + var responseIndex = Interlocked.Increment(ref callCount) - 1; + if (responseIndex >= responses.Length) + throw new InvalidOperationException($"Unexpected GetObjectAsync call #{responseIndex + 1}"); + return Task.FromResult(responses[responseIndex]); + }); + } + + /// + /// Creates a mock S3 client that captures the cancellation token used. + /// + public static Mock CreateMockS3ClientWithTokenCapture(Action tokenCapture) + { + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Callback((req, ct) => tokenCapture(ct)) + .ReturnsAsync(CreateSinglePartResponse(1024)); + + var mockConfig = new Mock(); + mockConfig.Object.BufferSize = BufferSize; + mockClient.Setup(x => x.Config).Returns(mockConfig.Object); + + return mockClient; + } + + /// + /// Creates a mock S3 client that throws OperationCanceledException. + /// + public static Mock CreateMockS3ClientWithCancellation() + { + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new OperationCanceledException()); + + var mockConfig = new Mock(); + mockConfig.Object.BufferSize = BufferSize; + mockClient.Setup(x => x.Config).Returns(mockConfig.Object); + + return mockClient; + } + + #endregion + + #region Test Setup Helpers + + /// + /// Configuration for validation tests. + /// + internal class ValidationTestConfig + { + public long PartSize { get; set; } + public long TotalSize { get; set; } + public int TotalParts { get; set; } + public string ETag { get; set; } + } + + /// + /// Types of validation failures that can occur during multipart downloads. + /// + public enum ValidationFailureType + { + MissingContentRange, + InvalidContentRangeFormat, + UnparseableRange, + RangeMismatch, + ETagMismatch + } + + /// + /// Creates a mock S3 client configured to produce a specific validation failure. + /// + internal static Mock CreateMockClientWithValidationFailure(ValidationFailureType failureType) + { + var config = new ValidationTestConfig + { + PartSize = 8 * 1024 * 1024, + TotalSize = 20 * 1024 * 1024, + TotalParts = 3, + ETag = "test-etag" + }; + + GetObjectResponse firstPartResponse; + GetObjectResponse secondPartResponse; + + if (failureType == ValidationFailureType.ETagMismatch) + { + // PART strategy for ETag testing + firstPartResponse = CreateMultipartFirstPartResponse( + config.PartSize, config.TotalParts, config.TotalSize, config.ETag); + secondPartResponse = CreateMockGetObjectResponse( + config.PartSize, config.TotalParts, + $"bytes {config.PartSize}-{2 * config.PartSize - 1}/{config.TotalSize}", + "different-etag"); + } + else + { + // RANGE strategy for ContentRange validation testing + firstPartResponse = CreateRangeResponse(0, config.PartSize - 1, config.TotalSize, config.ETag); + secondPartResponse = CreateInvalidResponse(failureType, config); + } + + return CreateSequentialMockClient(firstPartResponse, secondPartResponse); + } + + /// + /// Creates an invalid GetObjectResponse based on the failure type. + /// + private static GetObjectResponse CreateInvalidResponse(ValidationFailureType failureType, ValidationTestConfig config) + { + return failureType switch + { + ValidationFailureType.MissingContentRange => CreateMockGetObjectResponse( + config.PartSize, null, null, config.ETag), + ValidationFailureType.InvalidContentRangeFormat => CreateMockGetObjectResponse( + config.PartSize, null, "invalid-format-no-slash", config.ETag), + ValidationFailureType.UnparseableRange => CreateMockGetObjectResponse( + config.PartSize, null, "bytes abc-xyz/20971520", config.ETag), + ValidationFailureType.RangeMismatch => CreateMockGetObjectResponse( + config.PartSize, null, $"bytes 0-{config.PartSize - 1}/{config.TotalSize}", config.ETag), + _ => throw new ArgumentException($"Unknown failure type: {failureType}") + }; + } + + /// + /// Creates a coordinator configured for validation testing. + /// + internal static MultipartDownloadManager CreateCoordinatorForValidationTest( + IAmazonS3 client, ValidationFailureType failureType) + { + var downloadType = failureType == ValidationFailureType.ETagMismatch + ? MultipartDownloadType.PART + : MultipartDownloadType.RANGE; + + var request = CreateOpenStreamRequest( + partSize: failureType == ValidationFailureType.ETagMismatch ? null : 8 * 1024 * 1024, + downloadType: downloadType); + + var config = CreateBufferedDownloadConfiguration(concurrentRequests: 1); + + var mockDataHandler = new Mock(); + mockDataHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockDataHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + return new MultipartDownloadManager(client, request, config, mockDataHandler.Object); + } + + /// + /// Creates a response appropriate for discovery based on download type and parameters. + /// + internal static GetObjectResponse CreateDiscoveryResponse( + MultipartDownloadType downloadType, long objectSize, int? partsCount) + { + if (downloadType == MultipartDownloadType.PART) + { + if (partsCount == null || partsCount == 1) + { + return CreateSinglePartResponse(objectSize, "single-part-etag"); + } + else + { + long partSize = objectSize / partsCount.Value; + return CreateMultipartFirstPartResponse(partSize, partsCount.Value, objectSize, "multipart-etag"); + } + } + else // RANGE + { + if (objectSize <= DefaultPartSize) + { + return CreateMockGetObjectResponse(objectSize, null, null, "small-object-etag"); + } + else + { + return CreateRangeResponse(0, DefaultPartSize - 1, objectSize, "range-etag"); + } + } + } + + /// + /// Creates a complete test setup for discovery testing. + /// + internal static (Mock, MultipartDownloadManager) CreateDiscoveryTestSetup( + MultipartDownloadType downloadType, long objectSize, int? partsCount, long? partSize = null) + { + var mockResponse = CreateDiscoveryResponse(downloadType, objectSize, partsCount); + var mockClient = CreateMockS3Client((req, ct) => Task.FromResult(mockResponse)); + var request = CreateOpenStreamRequest( + partSize: partSize ?? (downloadType == MultipartDownloadType.RANGE ? DefaultPartSize : (long?)null), + downloadType: downloadType); + var config = CreateBufferedDownloadConfiguration(); + + var mockDataHandler = new Mock(); + mockDataHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockDataHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + return (mockClient, coordinator); + } + + /// + /// Creates a basic mock data handler for testing. + /// + internal static Mock CreateMockDataHandler() + { + var mockHandler = new Mock(); + mockHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + mockHandler.Setup(x => x.ReleaseCapacity()); + mockHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + return mockHandler; + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs new file mode 100644 index 000000000000..907c5b52f8d6 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs @@ -0,0 +1,359 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class OpenStreamWithResponseCommandTests + { + #region ExecuteAsync Tests - Success + + [TestMethod] + public async Task ExecuteAsync_CreatesBufferedMultipartStream() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.IsInstanceOfType(response.ResponseStream, typeof(BufferedMultipartStream)); + } + + [TestMethod] + public async Task ExecuteAsync_CallsInitializeAsync() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.IsNotNull(stream.DiscoveryResult); // Indicates initialization occurred + } + + [TestMethod] + public async Task ExecuteAsync_ReturnsResponse() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsInstanceOfType(response, typeof(TransferUtilityOpenStreamResponse)); + } + + [TestMethod] + public async Task ExecuteAsync_SetsResponseStream() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response.ResponseStream); + Assert.IsTrue(response.ResponseStream.CanRead); + } + + #endregion + + #region ExecuteAsync Tests - Response Mapping + + [TestMethod] + public async Task ExecuteAsync_MapsMetadataFromInitialResponse() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponseWithEncryption( + contentLength: 1024, + partsCount: null, + contentRange: null, + eTag: "test-etag-123", + testData: testData, + includeHeaders: true, + serverSideEncryptionMethod: ServerSideEncryptionMethod.AES256, + serverSideEncryptionKeyManagementServiceKeyId: "test-kms-key"); + + // Add custom headers + mockResponse.Headers["Content-Language"] = "en-US"; + mockResponse.Headers["Cache-Control"] = "max-age=3600"; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("test-etag-123", response.ETag); + Assert.IsNotNull(response.Headers); + Assert.AreEqual(ServerSideEncryptionMethod.AES256, response.ServerSideEncryptionMethod); + Assert.AreEqual("test-kms-key", response.ServerSideEncryptionKeyManagementServiceKeyId); + } + + [TestMethod] + public async Task ExecuteAsync_SinglePart_MapsFromSinglePartResponse() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: 2048, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("single-part-etag", response.ETag); + } + + [TestMethod] + public async Task ExecuteAsync_Multipart_MapsFromInitialResponse() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; + var partSize = 10 * 1024 * 1024; + var totalParts = 5; + + // Use CreateMockS3ClientForMultipart to properly mock all parts + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multipart-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("multipart-etag", response.ETag); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion + + #region ContentLength and ContentRange Validation Tests + + [TestMethod] + public async Task ExecuteAsync_SinglePart_SetsCorrectContentLengthAndRange() + { + // Arrange + var objectSize = 2048; + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: objectSize, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - SEP Part GET Step 7 / Ranged GET Step 9 + Assert.AreEqual(objectSize, response.Headers.ContentLength, + "ContentLength should equal total object size"); + Assert.AreEqual($"bytes 0-{objectSize - 1}/{objectSize}", response.ContentRange, + "ContentRange should be bytes 0-(ContentLength-1)/ContentLength"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_MultipartPartStrategy_SetsCorrectContentLengthAndRange() + { + // Arrange + var totalParts = 5; + var partSize = 10 * 1024 * 1024; // 10MB per part + var totalObjectSize = (long)totalParts * partSize; // 50MB total + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multipart-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - SEP Part GET Step 7 + Assert.AreEqual(totalObjectSize, response.Headers.ContentLength, + "ContentLength should equal total object size, not first part size"); + Assert.AreEqual($"bytes 0-{totalObjectSize - 1}/{totalObjectSize}", response.ContentRange, + "ContentRange should be bytes 0-(ContentLength-1)/ContentLength for entire object"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_MultipartRangeStrategy_SetsCorrectContentLengthAndRange() + { + // Arrange + var totalObjectSize = 25 * 1024 * 1024; // 25MB total + var partSize = 8 * 1024 * 1024; // 8MB per part + var totalParts = (int)Math.Ceiling((double)totalObjectSize / partSize); // 4 parts + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "range-multipart-etag", usePartStrategy: false); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - SEP Ranged GET Step 9 + Assert.AreEqual(totalObjectSize, response.Headers.ContentLength, + "ContentLength should equal total object size, not first range size"); + Assert.AreEqual($"bytes 0-{totalObjectSize - 1}/{totalObjectSize}", response.ContentRange, + "ContentRange should be bytes 0-(ContentLength-1)/ContentLength for entire object"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public async Task ExecuteAsync_EndToEnd_SinglePart() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: 512, + partsCount: null, + contentRange: null, + eTag: "single-etag", + testData: testData); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + + // Verify we can read from the stream + var buffer = new byte[256]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.AreEqual(256, bytesRead); + + // Verify data matches + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, 256)); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_EndToEnd_Multipart() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multi-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.IsInstanceOfType(response.ResponseStream, typeof(BufferedMultipartStream)); + + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.AreEqual(totalParts, stream.DiscoveryResult.TotalParts); + Assert.AreEqual(totalObjectSize, stream.DiscoveryResult.ObjectSize); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs new file mode 100644 index 000000000000..72e3a11158c4 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs @@ -0,0 +1,1007 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Buffers; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for PartBufferManager class. + /// Tests buffer management, sequential access, and cross-part boundary reading. + /// + [TestClass] + public class PartBufferManagerTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidConfiguration_CreatesManager() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + // Act + var manager = new PartBufferManager(config); + + // Assert + Assert.IsNotNull(manager); + Assert.AreEqual(1, manager.NextExpectedPartNumber); + + // Cleanup + manager.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfiguration_ThrowsArgumentNullException() + { + // Act + var manager = new PartBufferManager(null); + + // Assert - ExpectedException + } + + #endregion + + #region Property Tests + + [TestMethod] + public void NextExpectedPartNumber_StartsAtOne() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act & Assert + Assert.AreEqual(1, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task NextExpectedPartNumber_IncrementsAfterPartComplete() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + + // Read part 1 completely + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Act & Assert - Should advance to part 2 + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region WaitForBufferSpaceAsync Tests + + [TestMethod] + public async Task WaitForBufferSpaceAsync_InitialState_AllowsImmediateAccess() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 5); + var manager = new PartBufferManager(config); + + try + { + // Act - Should complete immediately + var task = manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(task.IsCompleted); + await task; // Should not throw + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task WaitForBufferSpaceAsync_WhenMaxPartsReached_Blocks() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 2); + var manager = new PartBufferManager(config); + + try + { + // Fill up to max parts + for (int i = 1; i <= 2; i++) + { + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(i, testBuffer, 512); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + } + + // Act - Try to wait for space (should block) + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Give a small delay to ensure it would block + await Task.Delay(50); + + // Assert - Should not have completed + Assert.IsFalse(waitTask.IsCompleted); + + // Cleanup - release space to unblock + manager.ReleaseBufferSpace(); + await waitTask; + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task WaitForBufferSpaceAsync_AfterRelease_AllowsAccess() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 1); + var manager = new PartBufferManager(config); + + try + { + // Take the one available slot + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + + // Release space + manager.ReleaseBufferSpace(); + + // Act - Should be able to wait again + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(waitTask.IsCompleted); + await waitTask; + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task WaitForBufferSpaceAsync_WithCancellation_ThrowsOperationCanceledException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 1); + var manager = new PartBufferManager(config); + var cts = new CancellationTokenSource(); + + try + { + // Take the one available slot + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Cancel immediately + cts.Cancel(); + + // Act & Assert + // Use try-catch to accept both OperationCanceledException and TaskCanceledException + // (TaskCanceledException derives from OperationCanceledException) + try + { + await manager.WaitForBufferSpaceAsync(cts.Token); + Assert.Fail("Expected OperationCanceledException was not thrown"); + } + catch (OperationCanceledException ex) + { + // Success - accepts both OperationCanceledException and derived types like TaskCanceledException + Assert.AreEqual(cts.Token, ex.CancellationToken, "CancellationToken should match the provided token"); + } + } + finally + { + manager.Dispose(); + cts.Dispose(); + } + } + + #endregion + + #region AddBufferAsync Tests + + [TestMethod] + public async Task AddBufferAsync_CreatesBufferedDataSource() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + + // Act + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task AddBufferAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + await manager.AddBufferAsync(null, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task AddBufferAsync_SignalsPartAvailable() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before part is available + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(50); + + // Add the part + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + + // Assert - Read should complete + int bytesRead = await readTask; + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region AddDataSource Tests + + [TestMethod] + public async Task AddDataSource_AddsToCollection() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Act + manager.AddDataSource(dataSource); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void AddDataSource_WithNullDataSource_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + manager.AddDataSource(null); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void AddDataSource_WithDuplicatePartNumber_ThrowsInvalidOperationException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer1 = ArrayPool.Shared.Rent(512); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 512); + var dataSource1 = new BufferedDataSource(partBuffer1); + manager.AddDataSource(dataSource1); + + // Try to add duplicate part 1 + byte[] testBuffer2 = ArrayPool.Shared.Rent(512); + var partBuffer2 = new StreamPartBuffer(1, testBuffer2, 512); + var dataSource2 = new BufferedDataSource(partBuffer2); + + // Act + manager.AddDataSource(dataSource2); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Sequential Access + + [TestMethod] + public async Task ReadAsync_ReadsDataSequentially() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + + // Act + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_AdvancesNextExpectedPartNumber() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + + // Read part 1 completely + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + await manager.ReadAsync(null, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + byte[] readBuffer = new byte[512]; + + try + { + // Act + await manager.ReadAsync(readBuffer, -1, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + byte[] readBuffer = new byte[512]; + + try + { + // Act + await manager.ReadAsync(readBuffer, 0, -1, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + byte[] readBuffer = new byte[512]; + + try + { + // Act + await manager.ReadAsync(readBuffer, 400, 200, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Waiting + + [TestMethod] + public async Task ReadAsync_WaitsForPartAvailability() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before part is available + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(100); + Assert.IsFalse(readTask.IsCompleted); + + // Add the part asynchronously + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + + // Assert - Read should complete + int bytesRead = await readTask; + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WhenDownloadComplete_ReturnsZero() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Mark download as complete + manager.MarkDownloadComplete(null); + + // Act + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(0, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ReadAsync_WhenDownloadFailed_ThrowsException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Mark download as failed + var testException = new Exception("Download failed"); + manager.MarkDownloadComplete(testException); + + // Act + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Cross-Part Boundary Reading + + [TestMethod] + public async Task ReadAsync_ReadingAcrossPartBoundary_FillsBuffer() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add Part 1 (100 bytes) + byte[] testData1 = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + byte[] testBuffer1 = ArrayPool.Shared.Rent(100); + Buffer.BlockCopy(testData1, 0, testBuffer1, 0, 100); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); + await manager.AddBufferAsync(partBuffer1, CancellationToken.None); + + // Add Part 2 (100 bytes) + byte[] testData2 = MultipartDownloadTestHelpers.GenerateTestData(100, 100); + byte[] testBuffer2 = ArrayPool.Shared.Rent(100); + Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.AddBufferAsync(partBuffer2, CancellationToken.None); + + // Act - Request 150 bytes (spans both parts) + byte[] readBuffer = new byte[150]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 150, CancellationToken.None); + + // Assert + Assert.AreEqual(150, bytesRead); + + // Verify first 100 bytes from part 1 + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData1, readBuffer, 0, 100)); + + // Verify next 50 bytes from part 2 + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData2, readBuffer, 100, 50)); + + // Should still be on part 2 (not complete yet, 50 bytes remaining) + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_MultiplePartsInSingleRead_AdvancesCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add 3 small parts (50 bytes each) + for (int i = 1; i <= 3; i++) + { + byte[] testData = MultipartDownloadTestHelpers.GeneratePartSpecificData(50, i); + byte[] testBuffer = ArrayPool.Shared.Rent(50); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 50); + var partBuffer = new StreamPartBuffer(i, testBuffer, 50); + await manager.AddBufferAsync(partBuffer, CancellationToken.None); + } + + // Act - Read 150 bytes (all 3 parts) + byte[] readBuffer = new byte[150]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 150, CancellationToken.None); + + // Assert + Assert.AreEqual(150, bytesRead); + Assert.AreEqual(4, manager.NextExpectedPartNumber); // Advanced to part 4 + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_PartCompletes_AdvancesToNextPart() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer1 = ArrayPool.Shared.Rent(100); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); + await manager.AddBufferAsync(partBuffer1, CancellationToken.None); + + // Read part 1 completely + byte[] readBuffer = new byte[100]; + await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // Assert - Should advance to part 2 + Assert.AreEqual(2, manager.NextExpectedPartNumber); + + // Add part 2 + byte[] testBuffer2 = ArrayPool.Shared.Rent(100); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.AddBufferAsync(partBuffer2, CancellationToken.None); + + // Read part 2 + int bytesRead = await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // Assert + Assert.AreEqual(100, bytesRead); + Assert.AreEqual(3, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_EmptyPart_ContinuesToNextPart() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add empty part 1 + byte[] testBuffer1 = ArrayPool.Shared.Rent(100); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 0); // 0 bytes + await manager.AddBufferAsync(partBuffer1, CancellationToken.None); + + // Add part 2 with data + byte[] testData2 = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + byte[] testBuffer2 = ArrayPool.Shared.Rent(100); + Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.AddBufferAsync(partBuffer2, CancellationToken.None); + + // Act - Try to read 100 bytes starting from part 1 + byte[] readBuffer = new byte[100]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // Assert - Should skip empty part 1 and read from part 2 + Assert.AreEqual(100, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData2, readBuffer, 0, 100)); + Assert.AreEqual(3, manager.NextExpectedPartNumber); // Advanced past both parts + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReleaseBufferSpace Tests + + [TestMethod] + public void ReleaseBufferSpace_IncreasesAvailableSlots() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 1); + var manager = new PartBufferManager(config); + + try + { + // Take the slot + var task1 = manager.WaitForBufferSpaceAsync(CancellationToken.None); + Assert.IsTrue(task1.IsCompleted); + + // Try to take another (should block) + var task2 = manager.WaitForBufferSpaceAsync(CancellationToken.None); + Assert.IsFalse(task2.IsCompleted); // Would block + + // Act - Release space + manager.ReleaseBufferSpace(); + + // Wait briefly for the release to take effect + Task.Delay(50).Wait(); + + // Assert - Second wait should now complete + Assert.IsTrue(task2.IsCompleted || task2.Wait(100)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public void ReleaseBufferSpace_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + manager.Dispose(); + + // Act + manager.ReleaseBufferSpace(); + + // Assert - ExpectedException + } + + #endregion + + #region MarkDownloadComplete Tests + + [TestMethod] + public async Task MarkDownloadComplete_WithNullException_SignalsSuccess() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + manager.MarkDownloadComplete(null); + + // Assert - Reading should return 0 (EOF) + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(0, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task MarkDownloadComplete_WithException_StoresException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + var testException = new Exception("Test exception"); + + try + { + // Act + manager.MarkDownloadComplete(testException); + + // Assert - Reading should throw + byte[] readBuffer = new byte[512]; + var ex = await Assert.ThrowsExceptionAsync(async () => + { + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + Assert.IsNotNull(ex.InnerException); + Assert.AreEqual(testException, ex.InnerException); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task MarkDownloadComplete_SignalsWaitingReads() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before download is complete + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(100); + Assert.IsFalse(readTask.IsCompleted); + + // Mark download complete + manager.MarkDownloadComplete(null); + + // Assert - Read should complete with 0 bytes + int bytesRead = await readTask; + Assert.AreEqual(0, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_DisposesAllDataSources() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + manager.AddBufferAsync(partBuffer, CancellationToken.None).Wait(); + + // Act + manager.Dispose(); + + // Assert - The underlying part buffer should be disposed + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_ClearsCollection() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + manager.AddBufferAsync(partBuffer, CancellationToken.None).Wait(); + + // Act + manager.Dispose(); + + // Assert - Should not throw (collection cleared) + // Further operations should throw ObjectDisposedException + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + // Act - Dispose multiple times + manager.Dispose(); + manager.Dispose(); + manager.Dispose(); + + // Assert - Should not throw + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task Operations_AfterDispose_ThrowObjectDisposedException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + manager.Dispose(); + + // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Assert - ExpectedException + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/StreamPartBufferTests.cs b/sdk/test/Services/S3/UnitTests/Custom/StreamPartBufferTests.cs new file mode 100644 index 000000000000..2ddde6d48238 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/StreamPartBufferTests.cs @@ -0,0 +1,396 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Buffers; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for StreamPartBuffer class. + /// Tests ArrayPool buffer management and position tracking. + /// + [TestClass] + public class StreamPartBufferTests + { + #region Creation Tests + + [TestMethod] + public void Create_WithValidParameters_CreatesBuffer() + { + // Arrange + int partNumber = 1; + int capacity = 1024; + int actualLength = 512; + + // Act + var partBuffer = StreamPartBuffer.Create(partNumber, capacity); + + try + { + // Simulate writing data + partBuffer.SetLength(actualLength); + + // Assert + Assert.AreEqual(partNumber, partBuffer.PartNumber); + Assert.IsNotNull(partBuffer.ArrayPoolBuffer); + Assert.IsTrue(partBuffer.ArrayPoolBuffer.Length >= capacity); // ArrayPool may return larger + Assert.AreEqual(actualLength, partBuffer.Length); + Assert.AreEqual(0, partBuffer.CurrentPosition); + Assert.AreEqual(actualLength, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void Create_InitializesWithZeroLength() + { + // Arrange + int partNumber = 2; + int capacity = 2048; + + // Act + var partBuffer = StreamPartBuffer.Create(partNumber, capacity); + + try + { + // Assert - Length should be 0 until SetLength is called + Assert.AreEqual(partNumber, partBuffer.PartNumber); + Assert.IsNotNull(partBuffer.ArrayPoolBuffer); + Assert.AreEqual(0, partBuffer.Length); + Assert.AreEqual(0, partBuffer.CurrentPosition); + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region Property Tests + + [TestMethod] + public void RemainingBytes_ReturnsCorrectValue() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act & Assert - At start + Assert.AreEqual(500, partBuffer.RemainingBytes); + + // Act & Assert - After reading some bytes + partBuffer.CurrentPosition = 100; + Assert.AreEqual(400, partBuffer.RemainingBytes); + + // Act & Assert - At end + partBuffer.CurrentPosition = 500; + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void Length_ReturnsCorrectValue() + { + // Arrange + int actualLength = 1000; + var partBuffer = StreamPartBuffer.Create(1, 2048); + partBuffer.SetLength(actualLength); + + try + { + // Act & Assert + Assert.AreEqual(actualLength, partBuffer.Length); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void CurrentPosition_CanBeUpdated() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act + partBuffer.CurrentPosition = 250; + + // Assert + Assert.AreEqual(250, partBuffer.CurrentPosition); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region Reading Position Tests + + [TestMethod] + public void CurrentPosition_AfterReading_UpdatesCorrectly() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Simulate reading 100 bytes + partBuffer.CurrentPosition += 100; + Assert.AreEqual(100, partBuffer.CurrentPosition); + Assert.AreEqual(400, partBuffer.RemainingBytes); + + // Simulate reading another 150 bytes + partBuffer.CurrentPosition += 150; + Assert.AreEqual(250, partBuffer.CurrentPosition); + Assert.AreEqual(250, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void RemainingBytes_WhenFullyRead_ReturnsZero() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act - Read all bytes + partBuffer.CurrentPosition = 500; + + // Assert + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region SetLength Tests + + [TestMethod] + public void SetLength_WithValidLength_SetsCorrectly() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + + // Act + partBuffer.SetLength(500); + + try + { + // Assert + Assert.AreEqual(500, partBuffer.Length); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void SetLength_CalledTwice_ThrowsException() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act - Try to set length again + partBuffer.SetLength(600); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void SetLength_WithNegativeLength_ThrowsException() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + + try + { + // Act + partBuffer.SetLength(-1); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void SetLength_ExceedsBufferCapacity_ThrowsException() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + + try + { + // Act - Try to set length larger than buffer capacity + partBuffer.SetLength(10000); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_ReturnsBufferToArrayPool() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + // Act + partBuffer.Dispose(); + + // Assert - Buffer should be returned (verified by checking it's nulled) + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + // Act - Dispose multiple times + partBuffer.Dispose(); + partBuffer.Dispose(); + partBuffer.Dispose(); + + // Assert - Should not throw + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_SetsArrayPoolBufferToNull() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + // Act + partBuffer.Dispose(); + + // Assert + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + #endregion + + #region Edge Cases + + [TestMethod] + public void Constructor_WithEmptyBuffer_HandlesCorrectly() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 0); + + try + { + // Assert + Assert.AreEqual(0, partBuffer.Length); + Assert.AreEqual(0, partBuffer.RemainingBytes); + Assert.AreEqual(0, partBuffer.CurrentPosition); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void RemainingBytes_WhenPositionBeyondLength_ReturnsZero() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 500); + + try + { + // Act - Position beyond actual length + partBuffer.CurrentPosition = 600; + + // Assert - RemainingBytes uses Math.Max(0, ...) to prevent negative + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region ToString Tests + + [TestMethod] + public void ToString_ReturnsExpectedFormat() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(3, testBuffer, 500); + + try + { + partBuffer.CurrentPosition = 100; + + // Act + string result = partBuffer.ToString(); + + // Assert - Verify format contains key information + Assert.IsTrue(result.Contains("Part=3")); + Assert.IsTrue(result.Contains("500 bytes")); + Assert.IsTrue(result.Contains("pos=100")); + Assert.IsTrue(result.Contains("remaining=400")); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + } +} From 2e268c0c9179d845225bca3d08ae97d4d2b818e0 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 28 Nov 2025 10:05:23 -0500 Subject: [PATCH 25/56] DownloadWithResponse with multipartdownload (#4136) --- .../9d07dc1e-d82d-4f94-8700-c7b57f872043.json | 11 + .../S3/Custom/Model/GetObjectResponse.cs | 86 +- .../Transfer/Internal/AtomicFileHandler.cs | 183 +++ .../Internal/BufferedPartDataHandler.cs | 78 +- .../Internal/FileDownloadConfiguration.cs | 67 ++ .../Transfer/Internal/FilePartDataHandler.cs | 221 ++++ .../Transfer/Internal/IPartDataHandler.cs | 11 +- .../Internal/MultipartDownloadCommand.cs | 115 ++ .../Internal/MultipartDownloadManager.cs | 65 +- .../_async/MultipartDownloadCommand.async.cs | 124 ++ .../OpenStreamWithResponseCommand.async.cs | 10 +- .../Transfer/_async/ITransferUtility.async.cs | 113 ++ .../Transfer/_async/TransferUtility.async.cs | 29 +- .../_bcl+netstandard/ITransferUtility.sync.cs | 114 ++ .../_bcl+netstandard/TransferUtility.sync.cs | 28 + .../S3/Custom/Util/ContentRangeParser.cs | 90 ++ ...ransferUtilityDownloadWithResponseTests.cs | 692 +++++++++++ .../Custom/AtomicFileHandlerTests.cs | 670 +++++++++++ .../Custom/BufferedPartDataHandlerTests.cs | 14 +- .../Custom/ContentRangeParserTests.cs | 381 ++++++ .../Custom/FileDownloadConfigurationTests.cs | 334 ++++++ .../FilePartDataHandlerConcurrencyTests.cs | 367 ++++++ .../Custom/FilePartDataHandlerTests.cs | 1017 +++++++++++++++++ .../Custom/MultipartDownloadCommandTests.cs | 796 +++++++++++++ .../Custom/MultipartDownloadTestHelpers.cs | 245 ++++ .../OpenStreamWithResponseCommandTests.cs | 28 + 26 files changed, 5759 insertions(+), 130 deletions(-) create mode 100644 generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/FileDownloadConfiguration.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs create mode 100644 sdk/src/Services/S3/Custom/Util/ContentRangeParser.cs create mode 100644 sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadWithResponseTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/AtomicFileHandlerTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/ContentRangeParserTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/FileDownloadConfigurationTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json new file mode 100644 index 000000000000..6793c5b842ce --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Created new DownloadWithResponseAsync method on the Amazon.S3.Transfer.TransferUtility class. The new operation supports downloading in parallel parts of the S3 object to a file for improved performance." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs index bf06d655ed56..a4155565b9fe 100644 --- a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs +++ b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs @@ -903,6 +903,59 @@ private void ValidateWrittenStreamSize(long bytesWritten) } #if BCL || NETSTANDARD + /// + /// Copies data from ResponseStream to destination stream with progress tracking and validation. + /// Internal method to enable reuse across different download scenarios. + /// + /// Stream to write data to + /// File path for progress event reporting (can be null) + /// Buffer size for reading/writing operations + /// Cancellation token + /// Whether to validate copied bytes match ContentLength + internal async System.Threading.Tasks.Task WriteResponseStreamAsync( + Stream destinationStream, + string filePath, + int bufferSize, + System.Threading.CancellationToken cancellationToken, + bool validateSize = true) + { + long current = 0; +#if NETSTANDARD + Stream stream = this.ResponseStream; +#else + Stream stream = new BufferedStream(this.ResponseStream); +#endif + byte[] buffer = new byte[bufferSize]; + int bytesRead = 0; + long totalIncrementTransferred = 0; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length, cancellationToken) + .ConfigureAwait(continueOnCapturedContext: false)) > 0) + { + cancellationToken.ThrowIfCancellationRequested(); + + await destinationStream.WriteAsync(buffer, 0, bytesRead, cancellationToken) + .ConfigureAwait(continueOnCapturedContext: false); + current += bytesRead; + totalIncrementTransferred += bytesRead; + + if (totalIncrementTransferred >= AWSSDKUtils.DefaultProgressUpdateInterval) + { + this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed: false); + totalIncrementTransferred = 0; + } + } + + if (validateSize) + { + ValidateWrittenStreamSize(current); + } + + // Encrypted objects may have size smaller than the total amount of data transferred due to padding. + // Instead of changing the file size or the total downloaded size, pass a flag that indicates transfer is complete. + this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed: true); + } + /// /// Writes the content of the ResponseStream a file indicated by the filePath argument. /// @@ -923,37 +976,8 @@ public async System.Threading.Tasks.Task WriteResponseStreamToFileAsync(string f try { - long current = 0; -#if NETSTANDARD - Stream stream = this.ResponseStream; -#else - Stream stream = new BufferedStream(this.ResponseStream); -#endif - byte[] buffer = new byte[S3Constants.DefaultBufferSize]; - int bytesRead = 0; - long totalIncrementTransferred = 0; - while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false)) > 0) - { - cancellationToken.ThrowIfCancellationRequested(); - - await downloadStream.WriteAsync(buffer, 0, bytesRead, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); - current += bytesRead; - totalIncrementTransferred += bytesRead; - - if (totalIncrementTransferred >= AWSSDKUtils.DefaultProgressUpdateInterval) - { - this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed:false); - totalIncrementTransferred = 0; - } - } - - ValidateWrittenStreamSize(current); - - // Encrypted objects may have size smaller than the total amount of data trasnfered due to padding. - // Instead of changing the file size or the total downloaded size, pass a flag that indicate that the transfer is complete. - this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed:true); + await WriteResponseStreamAsync(downloadStream, filePath, S3Constants.DefaultBufferSize, cancellationToken, validateSize: true) + .ConfigureAwait(continueOnCapturedContext: false); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs new file mode 100644 index 000000000000..5c9f6909c92a --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs @@ -0,0 +1,183 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.IO; +using System.Security.Cryptography; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Handles atomic file operations for multipart downloads using SEP-compliant temporary file pattern. + /// Creates .s3tmp.{uniqueId} files and ensures atomic commits to prevent partial file corruption. + /// + internal class AtomicFileHandler : IDisposable + { + private string _tempFilePath; + private bool _disposed = false; + + /// + /// Creates a temporary file with unique identifier for atomic operations. + /// Pattern: {destinationPath}.s3tmp.{8-char-unique-id} + /// Uses FileMode.CreateNew for atomic file creation (no race condition). + /// + public string CreateTemporaryFile(string destinationPath) + { + if (string.IsNullOrEmpty(destinationPath)) + throw new ArgumentException("Destination path cannot be null or empty", nameof(destinationPath)); + + // Create directory if it doesn't exist (Directory.CreateDirectory is idempotent) + var directory = Path.GetDirectoryName(destinationPath); + if (!string.IsNullOrEmpty(directory)) + { + Directory.CreateDirectory(directory); + } + + // Try up to 100 times to create unique file atomically + for (int attempt = 0; attempt < 100; attempt++) + { + var uniqueId = GenerateRandomId(8); + var tempPath = $"{destinationPath}.s3tmp.{uniqueId}"; + + try + { + // FileMode.CreateNew fails atomically if file exists - no race condition + using (var stream = new FileStream(tempPath, FileMode.CreateNew, FileAccess.Write)) + { + // File created successfully - immediately close it + } + + _tempFilePath = tempPath; + return tempPath; + } + catch (IOException) when (attempt < 99) + { + // File exists, try again with new ID + continue; + } + } + + throw new InvalidOperationException("Unable to generate unique temporary file name after 100 attempts"); + } + + /// + /// Atomically commits the temporary file to the final destination. + /// Uses File.Replace for atomic replacement when destination exists, or File.Move for new files. + /// This prevents data loss if the process crashes during commit. + /// + public void CommitFile(string tempPath, string destinationPath) + { + if (string.IsNullOrEmpty(tempPath)) + throw new ArgumentException("Temp path cannot be null or empty", nameof(tempPath)); + if (string.IsNullOrEmpty(destinationPath)) + throw new ArgumentException("Destination path cannot be null or empty", nameof(destinationPath)); + + if (!File.Exists(tempPath)) + throw new FileNotFoundException($"Temporary file not found: {tempPath}"); + + try + { + // Use File.Replace for atomic replacement when overwriting existing file + // This prevents data loss if process crashes between delete and move operations + // File.Replace is atomic on Windows (ReplaceFile API) and Unix (rename syscall) + if (File.Exists(destinationPath)) + { + File.Replace(tempPath, destinationPath, null); + } + else + { + // For new files, File.Move is sufficient and atomic on same volume + File.Move(tempPath, destinationPath); + } + + if (_tempFilePath == tempPath) + _tempFilePath = null; // Successfully committed + } + catch (Exception ex) + { + throw new InvalidOperationException($"Failed to commit temporary file {tempPath} to {destinationPath}", ex); + } + } + + /// + /// Cleans up temporary file in case of failure or cancellation. + /// Safe to call multiple times - File.Delete() is idempotent (no-op if file doesn't exist). + /// + public void CleanupOnFailure(string tempPath = null) + { + var pathToClean = string.IsNullOrEmpty(tempPath) ? _tempFilePath : tempPath; + + if (string.IsNullOrEmpty(pathToClean)) + return; + + try + { + // File.Delete() is idempotent - doesn't throw if file doesn't exist + File.Delete(pathToClean); + + if (_tempFilePath == pathToClean) + _tempFilePath = null; + } + catch (IOException) + { + // Log warning but don't throw - cleanup is best effort + // In production, this would use proper logging infrastructure + } + catch (UnauthorizedAccessException) + { + // Log warning but don't throw - cleanup is best effort + } + } + + /// + /// Generates a cryptographically secure random identifier of specified length. + /// Uses base32 encoding to avoid filesystem-problematic characters. + /// + private string GenerateRandomId(int length) + { + const string base32Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; // RFC 4648 base32 + + using (var rng = RandomNumberGenerator.Create()) + { + var bytes = new byte[length]; + rng.GetBytes(bytes); + + var result = new char[length]; + for (int i = 0; i < length; i++) + { + result[i] = base32Chars[bytes[i] % base32Chars.Length]; + } + + return new string(result); + } + } + + public void Dispose() + { + if (!_disposed) + { + // Cleanup any remaining temp file + CleanupOnFailure(); + _disposed = true; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs index 9ce05f6e71f6..ae55c6c2422c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -57,6 +57,12 @@ public BufferedPartDataHandler( _config = config ?? throw new ArgumentNullException(nameof(config)); } + public Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken) + { + // No preparation needed for buffered handler - buffers are created on demand + return Task.CompletedTask; + } + /// public async Task ProcessPartAsync( int partNumber, @@ -127,55 +133,39 @@ private async Task BufferPartFromResponseAsync( // Get reference to the buffer for writing var partBuffer = downloadedPart.ArrayPoolBuffer; - int totalRead = 0; - int chunkCount = 0; - - // Read response stream into buffer in chunks based on ContentLength. - // Example: For a 10MB part with 8KB BufferSize: - // - Loop 1: remainingBytes=10MB, readSize=8KB → reads 8KB at offset 0 - // - Loop 2: remainingBytes=9.992MB, readSize=8KB → reads 8KB at offset 8KB - // - ...continues until totalRead reaches 10MB (1,280 iterations) - while (totalRead < expectedBytes) + // Create a MemoryStream wrapper around the pooled buffer + // writable: true allows WriteResponseStreamAsync to write to it + // The MemoryStream starts at position 0 and can grow up to initialBufferSize + using (var memoryStream = new MemoryStream(partBuffer, 0, initialBufferSize, writable: true)) { - // Calculate how many bytes we still need to read - int remainingBytes = (int)(expectedBytes - totalRead); - - // Read in chunks up to BufferSize, but never exceed remaining bytes - int readSize = Math.Min(remainingBytes, _config.BufferSize); - - // Read directly into buffer at current position - int bytesRead = await response.ResponseStream.ReadAsync( - partBuffer, - totalRead, - readSize, - cancellationToken).ConfigureAwait(false); + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Reading response stream into buffer", + partNumber); + + // Use GetObjectResponse's stream copy logic which includes: + // - Progress tracking with events + // - Size validation (ContentLength vs bytes read) + // - Buffered reading with proper chunk sizes + await response.WriteResponseStreamAsync( + memoryStream, + null, // destination identifier (not needed for memory stream) + _config.BufferSize, + cancellationToken, + validateSize: true) + .ConfigureAwait(false); - if (bytesRead == 0) - { - var errorMessage = $"Unexpected end of stream while downloading part {partNumber}. " + - $"Expected {expectedBytes} bytes but only received {totalRead} bytes. " + - $"This indicates a network error or S3 service issue."; - - Logger.Error(null, "BufferedPartDataHandler: [Part {0}] {1}", - partNumber, errorMessage); - - throw new IOException(errorMessage); - } + int totalRead = (int)memoryStream.Position; - totalRead += bytesRead; - chunkCount++; - } - - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Read {1} bytes in {2} chunks from response stream", - partNumber, totalRead, chunkCount); + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Read {1} bytes from response stream", + partNumber, totalRead); - // Set the length to reflect actual bytes read - downloadedPart.SetLength(totalRead); + // Set the length to reflect actual bytes read + downloadedPart.SetLength(totalRead); - if (totalRead != expectedBytes) - { - Logger.Error(null, "BufferedPartDataHandler: [Part {0}] Size mismatch - Expected {1} bytes, read {2} bytes", - partNumber, expectedBytes, totalRead); + if (totalRead != expectedBytes) + { + Logger.Error(null, "BufferedPartDataHandler: [Part {0}] Size mismatch - Expected {1} bytes, read {2} bytes", + partNumber, expectedBytes, totalRead); + } } return downloadedPart; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FileDownloadConfiguration.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FileDownloadConfiguration.cs new file mode 100644 index 000000000000..2193fa860ca6 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FileDownloadConfiguration.cs @@ -0,0 +1,67 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Configuration settings for file-based multipart downloads. + /// Extends base coordinator settings with file-specific parameters. + /// + internal class FileDownloadConfiguration : DownloadManagerConfiguration + { + /// + /// Buffer size for file I/O operations. + /// + public int BufferSize { get; set; } + + /// + /// Destination file path for the download. + /// + public string DestinationFilePath { get; set; } + + /// + /// Creates a FileDownloadConfiguration with the specified configuration values. + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// Buffer size used for file I/O operations. + /// Target size for each part in bytes. + /// Destination file path for the download. + /// Thrown when any numeric parameter is less than or equal to 0. + /// Thrown when destinationFilePath is null or empty. + public FileDownloadConfiguration( + int concurrentServiceRequests, + int bufferSize, + long targetPartSizeBytes, + string destinationFilePath) + : base(concurrentServiceRequests, targetPartSizeBytes) + { + if (bufferSize <= 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize), "Must be greater than 0"); + if (string.IsNullOrWhiteSpace(destinationFilePath)) + throw new ArgumentException("Destination file path cannot be null or empty", nameof(destinationFilePath)); + + BufferSize = bufferSize; + DestinationFilePath = destinationFilePath; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs new file mode 100644 index 000000000000..74c046611dfe --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs @@ -0,0 +1,221 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; +using Amazon.S3.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Writes downloaded parts directly to a file at specific offsets. + /// Supports concurrent writes from multiple parts for true parallel download to disk. + /// Uses temporary files with atomic commit for SEP compliance. + /// See for the contract this class implements. + /// + internal class FilePartDataHandler : IPartDataHandler + { + private readonly FileDownloadConfiguration _config; + private readonly AtomicFileHandler _fileHandler; + private string _tempFilePath; + private bool _disposed = false; + + private Logger Logger + { + get { return Logger.GetLogger(typeof(TransferUtility)); } + } + + public FilePartDataHandler(FileDownloadConfiguration config) + { + _config = config ?? throw new ArgumentNullException(nameof(config)); + _fileHandler = new AtomicFileHandler(); + } + + /// + public Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken) + { + // Create temporary file once during preparation phase + _tempFilePath = _fileHandler.CreateTemporaryFile(_config.DestinationFilePath); + + Logger.DebugFormat("FilePartDataHandler: Created temporary file for download"); + + return Task.CompletedTask; + } + + /// + public async Task ProcessPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + Logger.DebugFormat("FilePartDataHandler: [Part {0}] Starting to process part - ContentLength={1}", + partNumber, response.ContentLength); + + // Calculate offset for this part based on ContentRange or part number + long offset = GetPartOffset(response, partNumber); + + Logger.DebugFormat("FilePartDataHandler: [Part {0}] Calculated file offset={1}", + partNumber, offset); + + // Write part data to file at the calculated offset + await WritePartToFileAsync(offset, response, cancellationToken) + .ConfigureAwait(false); + + Logger.DebugFormat("FilePartDataHandler: [Part {0}] File write completed successfully", + partNumber); + } + + /// + public Task WaitForCapacityAsync(CancellationToken cancellationToken) + { + // No backpressure needed - OS handles concurrent file access + return Task.CompletedTask; + } + + /// + public void ReleaseCapacity() + { + // No-op + } + + /// + public void OnDownloadComplete(Exception exception) + { + if (exception == null) + { + // Success - commit temp file to final destination + Logger.DebugFormat("FilePartDataHandler: Download complete, committing temporary file to destination"); + + try + { + _fileHandler.CommitFile(_tempFilePath, _config.DestinationFilePath); + + Logger.DebugFormat("FilePartDataHandler: Successfully committed file to destination"); + } + catch (Exception commitException) + { + Logger.Error(commitException, "FilePartDataHandler: Failed to commit file to destination"); + + // Cleanup on commit failure + _fileHandler.CleanupOnFailure(); + throw new InvalidOperationException( + "Failed to commit downloaded file to final destination", commitException); + } + } + else + { + // Failure - cleanup temp file + Logger.Error(exception, "FilePartDataHandler: Download failed, cleaning up temporary file"); + + _fileHandler.CleanupOnFailure(); + } + } + + /// + public void Dispose() + { + if (!_disposed) + { + _fileHandler?.Dispose(); + _disposed = true; + } + } + + /// + /// Gets the file offset for writing a part based on the header. + /// + private long GetPartOffset(GetObjectResponse response, int partNumber) + { + // Parse offset from ContentRange header (works for both PART and RANGE strategies) + if (!string.IsNullOrEmpty(response.ContentRange)) + { + // Use centralized ContentRange parsing utility + return ContentRangeParser.GetStartByte(response.ContentRange); + } + + // For single-part downloads (especially empty objects), ContentRange may not be present + // S3 doesn't include ContentRange for simple GET requests without range headers + // In this case, the offset is always 0 since we're writing the entire response + if (partNumber == 1) + { + return 0; + } + + // ContentRange should be present for actual multipart downloads (part > 1) + throw new InvalidOperationException( + $"ContentRange header missing from part {partNumber} response. " + + $"Unable to determine file write offset."); + } + + /// + /// Writes part data from GetObjectResponse ResponseStream to the file at the specified offset. + /// + private async Task WritePartToFileAsync( + long offset, + GetObjectResponse response, + CancellationToken cancellationToken) + { + if (string.IsNullOrEmpty(_tempFilePath)) + throw new InvalidOperationException("Temporary file has not been created"); + + Logger.DebugFormat("FilePartDataHandler: Opening file for writing at offset {0} with BufferSize={1}", + offset, _config.BufferSize); + + // Open file with FileShare.Write to allow concurrent writes from other threads + using (var fileStream = new FileStream( + _tempFilePath, + FileMode.Open, // Open existing file + FileAccess.Write, + FileShare.Write, // Allow concurrent writes to different offsets + _config.BufferSize)) + { + // Seek to the correct offset for this part + fileStream.Seek(offset, SeekOrigin.Begin); + + Logger.DebugFormat("FilePartDataHandler: Writing {0} bytes to file at offset {1}", + response.ContentLength, offset); + + // Use GetObjectResponse's stream copy logic which includes: + // - Progress tracking with events + // - Size validation + // - Buffered reading + await response.WriteResponseStreamAsync( + fileStream, + null, + _config.BufferSize, + cancellationToken, + validateSize: true) + .ConfigureAwait(false); + + // Ensure data is written to disk + await fileStream.FlushAsync(cancellationToken) + .ConfigureAwait(false); + + Logger.DebugFormat("FilePartDataHandler: Successfully wrote {0} bytes at offset {1}", + response.ContentLength, offset); + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs index 63acd951a062..864a49acbaa7 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs @@ -32,7 +32,16 @@ namespace Amazon.S3.Transfer.Internal /// Enables separation of download orchestration from data handling (buffering, file writing, etc). ///
internal interface IPartDataHandler : IDisposable - { + { + /// + /// Prepare the handler for processing parts based on discovery result. + /// Called once before any parts are processed to perform initialization. + /// + /// Discovery result containing object metadata + /// Cancellation token + /// Task that completes when preparation is done + Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken); + /// /// Process a downloaded part from the GetObjectResponse. /// Implementation decides whether to buffer in memory, write to file, etc. diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs new file mode 100644 index 000000000000..820900621e80 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs @@ -0,0 +1,115 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; +using Amazon.S3.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Command for downloading files using multipart download strategy. + /// Orchestrates the and + /// to perform concurrent part downloads with SEP compliance. + /// + internal partial class MultipartDownloadCommand : BaseCommand + { + private readonly IAmazonS3 _s3Client; + private readonly TransferUtilityDownloadRequest _request; + private readonly TransferUtilityConfig _config; + + private static Logger Logger + { + get + { + return Logger.GetLogger(typeof(TransferUtility)); + } + } + + /// + /// Initializes a new instance of the MultipartDownloadCommand class. + /// + /// The S3 client to use for downloads. + /// The download request containing configuration. + /// The TransferUtility configuration. + internal MultipartDownloadCommand(IAmazonS3 s3Client, TransferUtilityDownloadRequest request, TransferUtilityConfig config) + { + _s3Client = s3Client ?? throw new ArgumentNullException(nameof(s3Client)); + _request = request ?? throw new ArgumentNullException(nameof(request)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + } + + /// + /// Validates the download request to ensure all required parameters are set. + /// + /// Thrown when required parameters are missing. + private void ValidateRequest() + { + if (!_request.IsSetBucketName()) + { + throw new InvalidOperationException("The BucketName specified is null or empty!"); + } + + if (!_request.IsSetKey()) + { + throw new InvalidOperationException("The Key specified is null or empty!"); + } + +#if BCL + if (!_request.IsSetFilePath()) + { + throw new InvalidOperationException("The FilePath specified is null or empty!"); + } +#endif + } + + /// + /// Creates a FileDownloadConfiguration from the request and S3 client configuration. + /// + /// A configured FileDownloadConfiguration instance. + private FileDownloadConfiguration CreateConfiguration() + { + // Use concurrent service requests from config + int concurrentRequests = _config.ConcurrentServiceRequests; + + // Determine target part size + // Use request setting if available, otherwise use 8MB default (matching BufferedMultipartStream) + long targetPartSize = _request.IsSetPartSize() + ? _request.PartSize + : S3Constants.DefaultPartSize; // 8MB default + + // Use S3 client buffer size for I/O operations + int bufferSize = _s3Client.Config.BufferSize; + + Logger.DebugFormat("MultipartDownloadCommand: Creating configuration - PartSizeFromRequest={0}, UsingDefaultPartSize={1}", + _request.IsSetPartSize() ? _request.PartSize.ToString() : "Not Set", + !_request.IsSetPartSize()); + + return new FileDownloadConfiguration( + concurrentRequests, + bufferSize, + targetPartSize, + _request.FilePath + ); + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index a22700560903..e38bf58035ce 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -29,6 +29,7 @@ using Amazon.Runtime; using Amazon.Runtime.Internal.Util; using Amazon.S3.Model; +using Amazon.S3.Util; namespace Amazon.S3.Transfer.Internal { @@ -58,6 +59,14 @@ private Logger Logger get { return Logger.GetLogger(typeof(TransferUtility)); } } + /// + /// Task that completes when all downloads finish (successfully or with error). + /// For file-based downloads, await this before returning to ensure file is committed. + /// For stream-based downloads, this can be ignored as the consumer naturally waits. + /// Returns a completed task if downloads haven't started or completed synchronously. + /// + public Task DownloadCompletionTask => _downloadCompletionTask ?? Task.CompletedTask; + /// /// Initializes a new instance of the class. /// @@ -87,13 +96,6 @@ public Exception DownloadException } } - /// - /// Gets a task that completes when all download tasks have finished. - /// Returns a completed task for single-part downloads. - /// For multipart downloads, this task can be awaited to observe exceptions from background downloads. - /// - public Task DownloadCompletionTask => _downloadCompletionTask ?? Task.CompletedTask; - /// public async Task DiscoverDownloadStrategyAsync(CancellationToken cancellationToken) { @@ -146,6 +148,9 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, C try { + // Prepare the data handler (e.g., create temp files for file-based downloads) + await _dataHandler.PrepareAsync(discoveryResult, cancellationToken).ConfigureAwait(false); + // Process Part 1 from InitialResponse (applies to both single-part and multipart) Logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); @@ -345,6 +350,9 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Canc private async Task DiscoverUsingPartStrategyAsync(CancellationToken cancellationToken) { + // Check for cancellation before making any S3 calls + cancellationToken.ThrowIfCancellationRequested(); + // SEP Part GET Step 1: "create a new GetObject request copying all fields in DownloadRequest. // Set partNumber to 1." var firstPartRequest = CreateGetObjectRequest(); @@ -353,6 +361,9 @@ private async Task DiscoverUsingPartStrategyAsync(Cance // SEP Part GET Step 2: "send the request and wait for the response in a non-blocking fashion" var firstPartResponse = await _s3Client.GetObjectAsync(firstPartRequest, cancellationToken).ConfigureAwait(false); + if (firstPartResponse == null) + throw new InvalidOperationException("Failed to retrieve object from S3"); + // SEP Part GET Step 3: Save ETag for later IfMatch validation in subsequent requests _savedETag = firstPartResponse.ETag; @@ -397,6 +408,9 @@ private async Task DiscoverUsingPartStrategyAsync(Cance private async Task DiscoverUsingRangeStrategyAsync(CancellationToken cancellationToken) { + // Check for cancellation before making any S3 calls + cancellationToken.ThrowIfCancellationRequested(); + // Get target part size for RANGE strategy (already set in config from request or default) var targetPartSize = _config.TargetPartSizeBytes; @@ -408,6 +422,10 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc // SEP Ranged GET Step 2: "send the request and wait for the response in a non-blocking fashion" var firstRangeResponse = await _s3Client.GetObjectAsync(firstRangeRequest, cancellationToken).ConfigureAwait(false); + // Defensive null check + if (firstRangeResponse == null) + throw new InvalidOperationException("Failed to retrieve object from S3"); + // SEP Ranged GET Step 5: "save Etag from the response to a variable" // (for IfMatch validation in subsequent requests) _savedETag = firstRangeResponse.ETag; @@ -498,34 +516,14 @@ private GetObjectRequest CreateGetObjectRequest() internal (long startByte, long endByte, long totalSize) ParseContentRange(string contentRange) { - if (string.IsNullOrEmpty(contentRange)) - throw new InvalidOperationException("Content-Range header is missing"); - - // Format: "bytes {start}-{end}/{total-size}" - var parts = contentRange.Replace("bytes ", "").Split('/'); - if (parts.Length != 2) - throw new InvalidOperationException($"Invalid ContentRange format: {contentRange}"); - - // Parse byte range - var rangeParts = parts[0].Split('-'); - if (rangeParts.Length != 2 || - !long.TryParse(rangeParts[0], out var startByte) || - !long.TryParse(rangeParts[1], out var endByte)) - throw new InvalidOperationException($"Unable to parse ContentRange byte range: {contentRange}"); - - // Parse total size - S3 always returns exact sizes, never wildcards - if (parts[1] == "*") - throw new InvalidOperationException($"Unexpected wildcard in ContentRange total size: {contentRange}. S3 always returns exact object sizes."); - if (!long.TryParse(parts[1], out var totalSize)) - throw new InvalidOperationException($"Unable to parse ContentRange total size: {contentRange}"); - - return (startByte, endByte, totalSize); + // Delegate to centralized ContentRange parsing utility + return ContentRangeParser.Parse(contentRange); } internal long ExtractTotalSizeFromContentRange(string contentRange) { - var (_, _, totalSize) = ParseContentRange(contentRange); - return totalSize; + // Delegate to centralized ContentRange parsing utility + return ContentRangeParser.GetTotalSize(contentRange); } internal void ValidateContentRange(GetObjectResponse response, int partNumber, long objectSize) @@ -553,11 +551,6 @@ internal void ValidateContentRange(GetObjectResponse response, int partNumber, l $"Actual: bytes {actualStartByte}-{actualEndByte}"); } } - - // TODO in future for file based download it also says - // Applicable to destinations to which the SDK writes parts parallelly, e.g., a file - // the content range of the response aligns with the starting offset of the destination to which the SDK writes the part. For example, given a part with content range of bytes 8388608-16777215/33555032, - // it should be written to the file from offset 8,388,608 to 1,6777,215. } private void ThrowIfDisposed() diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs new file mode 100644 index 000000000000..23b423410bca --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs @@ -0,0 +1,124 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + internal partial class MultipartDownloadCommand : BaseCommand + { + /// + public override async Task ExecuteAsync(CancellationToken cancellationToken) + { + // Validate request parameters + ValidateRequest(); + + // Create configuration from request settings + var config = CreateConfiguration(); + + Logger.DebugFormat("MultipartDownloadCommand: Configuration - ConcurrentServiceRequests={0}, BufferSize={1}, TargetPartSize={2}", + config.ConcurrentServiceRequests, + config.BufferSize, + config.TargetPartSizeBytes + ); + + // Create data handler for writing parts to disk + using (var dataHandler = new FilePartDataHandler(config)) + { + // Create coordinator to manage the download process + using (var coordinator = new MultipartDownloadManager( + _s3Client, + _request, + config, + dataHandler, + RequestEventHandler)) + { + try + { + // Step 1: Discover download strategy (PART or RANGE) and get metadata + Logger.DebugFormat("MultipartDownloadCommand: Discovering download strategy"); + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(cancellationToken) + .ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadCommand: Discovered {0} part(s), total size: {1} bytes, IsSinglePart={2}", + discoveryResult.TotalParts, discoveryResult.ObjectSize, discoveryResult.IsSinglePart); + + // Step 2: Start concurrent downloads for all parts + Logger.DebugFormat("MultipartDownloadCommand: Starting downloads for {0} part(s)", discoveryResult.TotalParts); + await coordinator.StartDownloadsAsync(discoveryResult, cancellationToken) + .ConfigureAwait(false); + + // Step 2b: Wait for all downloads to complete before returning + // This ensures file is fully written and committed for file-based downloads + // For stream-based downloads, this task completes immediately (no-op) + Logger.DebugFormat("MultipartDownloadCommand: Waiting for download completion"); + await coordinator.DownloadCompletionTask.ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadCommand: Completed multipart download"); + + // Step 3: Map the response from the initial GetObject response + // The initial response contains all the metadata we need + var mappedResponse = ResponseMapper.MapGetObjectResponse(discoveryResult.InitialResponse); + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Set ContentLength to total object size (not just first part) + mappedResponse.Headers.ContentLength = discoveryResult.ObjectSize; + + // Set ContentRange to represent the entire object: bytes 0-(ContentLength-1)/ContentLength + // S3 returns null for 0-byte objects, so we match that behavior + if (discoveryResult.ObjectSize == 0) + { + mappedResponse.ContentRange = null; + } + else + { + mappedResponse.ContentRange = $"bytes 0-{discoveryResult.ObjectSize - 1}/{discoveryResult.ObjectSize}"; + } + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Handle composite checksums for multipart objects + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + if (mappedResponse.ChecksumType == ChecksumType.COMPOSITE) + { + mappedResponse.ChecksumCRC32 = null; + mappedResponse.ChecksumCRC32C = null; + mappedResponse.ChecksumCRC64NVME = null; + mappedResponse.ChecksumSHA1 = null; + mappedResponse.ChecksumSHA256 = null; + } + + return mappedResponse; + } + catch (Exception ex) + { + Logger.Error(ex, "Exception during multipart download"); + throw; + } + } + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs index 0432c10f8bf7..73e94061ccd3 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs @@ -61,7 +61,15 @@ public override async Task ExecuteAsync(Cance response.Headers.ContentLength = discoveryResult.ObjectSize; // Set ContentRange to represent the entire object: bytes 0-(ContentLength-1)/ContentLength - response.ContentRange = $"bytes 0-{discoveryResult.ObjectSize - 1}/{discoveryResult.ObjectSize}"; + // S3 returns null for 0-byte objects, so we match that behavior + if (discoveryResult.ObjectSize == 0) + { + response.ContentRange = null; + } + else + { + response.ContentRange = $"bytes 0-{discoveryResult.ObjectSize - 1}/{discoveryResult.ObjectSize}"; + } // SEP Part GET Step 7 / Ranged GET Step 9: // Handle composite checksums for multipart objects diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs index 6e2942baba23..bb01d8094c9f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs @@ -310,6 +310,13 @@ public partial interface ITransferUtility : IDisposable /// If the key is not specified in the request parameter, /// the file name will used as the key name. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads to improve performance + /// and returns response metadata. + /// + /// /// /// Contains all the parameters required to download an Amazon S3 object. /// @@ -319,6 +326,112 @@ public partial interface ITransferUtility : IDisposable /// The task object representing the asynchronous operation. Task DownloadAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Downloads the content from Amazon S3 and writes it to the specified file, returning response metadata. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// + /// The file path where the downloaded content will be written. + /// + /// + /// The name of the bucket containing the Amazon S3 object to download. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Downloads the content from Amazon S3 based on the request and returns response metadata. + /// To track the progress of the download, add an event listener to the request's WriteObjectProgressEvent. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityDownloadRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// FilePath = "local-file.txt", + /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// }; + /// var response = await transferUtility.DownloadWithResponseAsync(request); + /// + /// + /// + /// Contains all the parameters required to download an Amazon S3 object. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadWithResponseAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion #region OpenStream diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index 79899ce70535..c48a44494ea1 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -139,16 +139,7 @@ public partial class TransferUtility : ITransferUtility } } - /// - /// Aborts the multipart uploads based on the specified request parameters. - /// - /// - /// Contains all the parameters required to abort multipart uploads. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task AbortMultipartUploadsAsync(TransferUtilityAbortMultipartUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(AbortMultipartUploadsAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -173,6 +164,24 @@ public partial class TransferUtility : ITransferUtility } } + /// + public async Task DownloadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructDownloadRequest(filePath, bucketName, key); + return await DownloadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task DownloadWithResponseAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(DownloadWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "Download"); + var command = new MultipartDownloadCommand(this._s3Client, request, this._config); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + #endregion #region OpenStream diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index 5a8a360ea00d..e8387a3ef2d9 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -393,6 +393,13 @@ public partial interface ITransferUtility /// /// Downloads the content from Amazon S3 and writes it to the specified file. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads to improve performance + /// and returns response metadata. + /// + /// /// /// The file path where the content from Amazon S3 will be written to. /// @@ -409,10 +416,117 @@ public partial interface ITransferUtility /// If the key is not specified in the request parameter, /// the file name will used as the key name. ///
+ /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads to improve performance + /// and returns response metadata. + /// + /// /// /// Contains all the parameters required to download an Amazon S3 object. /// void Download(TransferUtilityDownloadRequest request); + + /// + /// Downloads the content from Amazon S3 and writes it to the specified file, returning response metadata. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// + /// The file path where the downloaded content will be written. + /// + /// + /// The name of the bucket containing the Amazon S3 object to download. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// Response metadata including headers and version information from the download. + TransferUtilityDownloadResponse DownloadWithResponse(string filePath, string bucketName, string key); + + /// + /// Downloads the content from Amazon S3 based on the request and returns response metadata. + /// To track the progress of the download, add an event listener to the request's WriteObjectProgressEvent. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityDownloadRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// FilePath = "local-file.txt", + /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// }; + /// var response = transferUtility.DownloadWithResponse(request); + /// + /// + /// + /// Contains all the parameters required to download an Amazon S3 object. + /// + /// Response metadata including headers and version information from the download. + TransferUtilityDownloadResponse DownloadWithResponse(TransferUtilityDownloadRequest request); #endregion #region DownloadDirectory diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index c37d98c2ed97..bc36fdc78e74 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -271,6 +271,34 @@ public void Download(TransferUtilityDownloadRequest request) ExceptionDispatchInfo.Capture(e.InnerException).Throw(); } } + + /// + public TransferUtilityDownloadResponse DownloadWithResponse(string filePath, string bucketName, string key) + { + try + { + return DownloadWithResponseAsync(filePath, bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + public TransferUtilityDownloadResponse DownloadWithResponse(TransferUtilityDownloadRequest request) + { + try + { + return DownloadWithResponseAsync(request).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } #endregion #region DownloadDirectory diff --git a/sdk/src/Services/S3/Custom/Util/ContentRangeParser.cs b/sdk/src/Services/S3/Custom/Util/ContentRangeParser.cs new file mode 100644 index 000000000000..ba5aff959e4c --- /dev/null +++ b/sdk/src/Services/S3/Custom/Util/ContentRangeParser.cs @@ -0,0 +1,90 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Util +{ + /// + /// Utility for parsing S3 ContentRange headers. + /// Format: "bytes {start}-{end}/{total}" + /// Example: "bytes 0-5242879/52428800" + /// + internal static class ContentRangeParser + { + /// + /// Parses ContentRange header into its components. + /// + /// ContentRange header value (e.g., "bytes 0-1023/2048") + /// Tuple of (startByte, endByte, totalSize) + /// If ContentRange format is invalid + public static (long startByte, long endByte, long totalSize) Parse(string contentRange) + { + if (string.IsNullOrEmpty(contentRange)) + throw new InvalidOperationException("Content-Range header is missing"); + + // Format: "bytes {start}-{end}/{total-size}" + // Remove "bytes " prefix if present + var parts = contentRange.Replace("bytes ", "").Split('/'); + if (parts.Length != 2) + throw new InvalidOperationException($"Invalid ContentRange format: {contentRange}"); + + // Parse byte range (start-end) + var rangeParts = parts[0].Split('-'); + if (rangeParts.Length != 2 || + !long.TryParse(rangeParts[0], out var startByte) || + !long.TryParse(rangeParts[1], out var endByte)) + throw new InvalidOperationException($"Unable to parse ContentRange byte range: {contentRange}"); + + // Parse total size - S3 always returns exact sizes, never wildcards + if (parts[1] == "*") + throw new InvalidOperationException($"Unexpected wildcard in ContentRange total size: {contentRange}. S3 always returns exact object sizes."); + if (!long.TryParse(parts[1], out var totalSize)) + throw new InvalidOperationException($"Unable to parse ContentRange total size: {contentRange}"); + + return (startByte, endByte, totalSize); + } + + /// + /// Extracts just the start byte position from ContentRange. + /// + /// ContentRange header value + /// Start byte position + /// If ContentRange format is invalid + public static long GetStartByte(string contentRange) + { + var (startByte, _, _) = Parse(contentRange); + return startByte; + } + + /// + /// Extracts just the total size from ContentRange. + /// + /// ContentRange header value + /// Total object size in bytes + /// If ContentRange format is invalid + public static long GetTotalSize(string contentRange) + { + var (_, _, totalSize) = Parse(contentRange); + return totalSize; + } + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadWithResponseTests.cs new file mode 100644 index 000000000000..db81e731129b --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadWithResponseTests.cs @@ -0,0 +1,692 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using Amazon.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.DownloadWithResponseAsync functionality. + /// These tests verify end-to-end functionality with actual S3 operations and file I/O. + /// + /// Most test scenarios (buffer sizes, part boundaries, error handling) are covered + /// in unit tests with mocked dependencies for faster execution. + /// + /// These integration tests focus on: + /// - Basic single-part downloads to files + /// - Basic multipart downloads to files + /// - Real S3 metadata preservation + /// - File handling (temp files, atomic writes, cleanup) + /// - Checksum validation + /// + [TestClass] + public class TransferUtilityDownloadWithResponseTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static string bucketName; + private static string tempDirectory; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + tempDirectory = Path.Combine(Path.GetTempPath(), "S3DownloadTests-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempDirectory); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + + // Clean up temp directory + if (Directory.Exists(tempDirectory)) + { + try + { + Directory.Delete(tempDirectory, recursive: true); + } + catch + { + // Best effort cleanup + } + } + + BaseClean(); + } + + [TestCleanup] + public void TestCleanup() + { + // Clean up any test files after each test + if (Directory.Exists(tempDirectory)) + { + foreach (var file in Directory.GetFiles(tempDirectory)) + { + try + { + File.Delete(file); + } + catch + { + // Best effort cleanup + } + } + } + } + + #region Single-Part Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + public async Task DownloadWithResponse_SinglePart_SmallObject() + { + // Arrange + var objectSize = 2 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + var downloadPath = Path.Combine(tempDirectory, key); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded data checksum should match"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + public async Task DownloadWithResponse_SinglePart_EmptyObject() + { + // Arrange + var key = UtilityMethods.GenerateName("empty-object"); + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + ContentBody = "" + }); + var downloadPath = Path.Combine(tempDirectory, key); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(0, response.Headers.ContentLength, "Content length should be 0"); + + // Verify ContentRange is null for 0-byte objects (matches S3 behavior) + Assert.IsNull(response.ContentRange, + "ContentRange should be null for 0-byte objects (matching S3 behavior)"); + + // Verify empty file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(0, fileInfo.Length, "Downloaded file should be empty"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + #endregion + + #region Multipart Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Multipart")] + public async Task DownloadWithResponse_Multipart_BasicDownload() + { + // Arrange - Simple multipart download to verify end-to-end S3 integration + var objectSize = 20 * MB; + var partSize = 8 * MB; + var key = UtilityMethods.GenerateName("multipart-download-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(uploadPath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + PartSize = partSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + PartSize = partSize + }; + + // Act + var response = await transferUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded data checksum should match"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + public async Task DownloadWithResponse_RangeStrategy_SmallSinglePartObject() + { + // Arrange - Small object that fits in single part, but using RANGE strategy + // This tests that ContentRange header is present even for single-part downloads + // when using RANGE strategy (S3 includes ContentRange when Range header is sent) + var objectSize = 2 * MB; // Less than default 8MB part size + var key = UtilityMethods.GenerateName("range-single-part-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(uploadPath); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath + }); + + // Act - Download with RANGE strategy even though only 1 part needed + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + MultipartDownloadType = MultipartDownloadType.RANGE, + PartSize = 8 * MB // Larger than file, so only 1 part needed + }; + + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify ContentRange is present (because RANGE strategy uses Range headers) + Assert.IsNotNull(response.ContentRange, + "ContentRange should be present when using RANGE strategy, even for single-part downloads"); + Assert.IsTrue(response.ContentRange.StartsWith("bytes "), + "ContentRange should have correct format"); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, + "Downloaded data checksum should match (RANGE strategy, single part)"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, + "Downloaded file size should match (RANGE strategy, single part)"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Multipart")] + public async Task DownloadWithResponse_Multipart_RangeDownload() + { + // Arrange - Test RANGE-based multipart download with custom part size + var objectSize = 20 * MB; + var uploadPartSize = 8 * MB; // Upload with 8MB parts + var downloadPartSize = 6 * MB; // Download with different 6MB parts + var key = UtilityMethods.GenerateName("multipart-range-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(uploadPath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + // Act - Download using RANGE strategy with different part size + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + MultipartDownloadType = MultipartDownloadType.RANGE, + PartSize = downloadPartSize + }; + + var response = await transferUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, + "Downloaded data checksum should match (RANGE strategy)"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, + "Downloaded file size should match (RANGE strategy)"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + #endregion + + #region Checksum Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Checksum")] + public async Task DownloadWithResponse_MultipartObjectWithChecksums_NullsCompositeChecksums() + { + // Arrange - Upload a multipart object with checksums + var objectSize = 20 * MB; + var key = UtilityMethods.GenerateName("composite-checksum-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Upload with checksum algorithm to create composite checksum + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + ChecksumAlgorithm = ChecksumAlgorithm.CRC32, + PartSize = 8 * MB + }; + + var uploadUtility = new TransferUtility(Client); + await uploadUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test composite checksums"); + + // Act - Download with ChecksumMode enabled + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + ChecksumMode = ChecksumMode.ENABLED + }; + + var response = await uploadUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert - Verify ChecksumType is COMPOSITE + Assert.AreEqual(ChecksumType.COMPOSITE, response.ChecksumType, + "ChecksumType should be COMPOSITE for multipart objects"); + + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + Assert.IsNull(response.ChecksumCRC32, "ChecksumCRC32 should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC32C, "ChecksumCRC32C should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC64NVME, "ChecksumCRC64NVME should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA1, "ChecksumSHA1 should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA256, "ChecksumSHA256 should be null for composite checksums"); + + // Verify other response properties are still populated correctly + Assert.IsNotNull(response.ETag, "ETag should still be populated"); + Assert.IsTrue(response.Headers.ContentLength > 0, "ContentLength should be populated"); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + #endregion + + #region Metadata Validation Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Metadata")] + public async Task DownloadWithResponse_PreservesMetadata() + { + // Arrange + var objectSize = 10 * MB; + var key = UtilityMethods.GenerateName("metadata-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + var putRequest = new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + ContentType = "application/octet-stream" + }; + putRequest.Metadata.Add("test-key", "test-value"); + putRequest.Metadata.Add("custom-header", "custom-value"); + + await Client.PutObjectAsync(putRequest); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual("application/octet-stream", response.Headers.ContentType); + + // S3 automatically prefixes user-defined metadata with "x-amz-meta-" + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-test-key"), + "Metadata should contain 'x-amz-meta-test-key'"); + Assert.AreEqual("test-value", response.Metadata["x-amz-meta-test-key"]); + + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-custom-header"), + "Metadata should contain 'x-amz-meta-custom-header'"); + Assert.AreEqual("custom-value", response.Metadata["x-amz-meta-custom-header"]); + + // Verify file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Metadata")] + public async Task DownloadWithResponse_PreservesETag() + { + // Arrange + var objectSize = 15 * MB; + var key = UtilityMethods.GenerateName("etag-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath + }); + + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + var expectedETag = metadata.ETag; + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.AreEqual(expectedETag, response.ETag, "ETag should match"); + + // Verify file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + #endregion + + #region File Handling Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("FileHandling")] + public async Task DownloadWithResponse_CreatesDirectoryIfNeeded() + { + // Arrange + var objectSize = 5 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + + // Create a nested directory path that doesn't exist + var nestedDir = Path.Combine(tempDirectory, "level1", "level2", "level3"); + var downloadPath = Path.Combine(nestedDir, key); + + Assert.IsFalse(Directory.Exists(nestedDir), "Nested directory should not exist initially"); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsTrue(Directory.Exists(nestedDir), "Nested directory should be created"); + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist in nested directory"); + + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded data checksum should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("FileHandling")] + public async Task DownloadWithResponse_OverwritesExistingFile() + { + // Arrange + var objectSize = 5 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + var downloadPath = Path.Combine(tempDirectory, key); + + // Create an existing file with different content + var existingContent = new byte[1024]; + new Random().NextBytes(existingContent); + File.WriteAllBytes(downloadPath, existingContent); + + var existingChecksum = CalculateFileChecksum(downloadPath); + Assert.AreNotEqual(expectedChecksum, existingChecksum, "Existing file should have different content"); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded file should have new content"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match new content"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + #endregion + + #region Helper Methods + + /// + /// Creates a test object in S3 with the specified size and returns its key and checksum. + /// + private static async Task<(string key, string checksum)> CreateTestObjectWithChecksum(long objectSize) + { + var key = UtilityMethods.GenerateName("download-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var checksum = CalculateFileChecksum(filePath); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }); + + // Cleanup temp upload file + File.Delete(filePath); + + return (key, checksum); + } + + /// + /// Calculates the MD5 checksum of a file. + /// + private static string CalculateFileChecksum(string filePath) + { + using (var md5 = System.Security.Cryptography.MD5.Create()) + using (var stream = File.OpenRead(filePath)) + { + var hash = md5.ComputeHash(stream); + return Convert.ToBase64String(hash); + } + } + + /// + /// Validates that the response contains expected values. + /// + private static void ValidateResponse(TransferUtilityDownloadResponse response, long expectedSize) + { + Assert.IsNotNull(response.Headers, "Headers should not be null"); + Assert.AreEqual(expectedSize, response.Headers.ContentLength, "Content length should match"); + Assert.IsNotNull(response.ETag, "ETag should not be null"); + } + + /// + /// Verifies that no temporary files remain after download completion. + /// Temp files use the pattern: {originalPath}.s3tmp.{8-char-id} + /// + private static void VerifyNoTempFilesExist(string filePath) + { + var directory = Path.GetDirectoryName(filePath); + var fileName = Path.GetFileName(filePath); + + if (Directory.Exists(directory)) + { + var tempFiles = Directory.GetFiles(directory, fileName + ".s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length, + $"No temporary files should remain. Found: {string.Join(", ", tempFiles)}"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/AtomicFileHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/AtomicFileHandlerTests.cs new file mode 100644 index 000000000000..89ce344513a6 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/AtomicFileHandlerTests.cs @@ -0,0 +1,670 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Linq; +using System.Text.RegularExpressions; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class AtomicFileHandlerTests + { + private string _testDirectory; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_CreatesHandler() + { + // Act + var handler = new AtomicFileHandler(); + + // Assert + Assert.IsNotNull(handler); + } + + #endregion + + #region CreateTemporaryFile Tests + + [TestMethod] + public void CreateTemporaryFile_CreatesFileWithS3TmpPattern() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + Assert.IsTrue(tempPath.Contains(".s3tmp.")); + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_Generates8CharacterUniqueId() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert - Extract unique ID from pattern: {dest}.s3tmp.{8-char-id} + var match = Regex.Match(tempPath, @"\.s3tmp\.([A-Z2-7]{8})$"); + Assert.IsTrue(match.Success, $"Temp file path '{tempPath}' doesn't match expected pattern"); + Assert.AreEqual(8, match.Groups[1].Value.Length); + } + + [TestMethod] + public void CreateTemporaryFile_CreatesDirectoryIfDoesntExist() + { + // Arrange + var handler = new AtomicFileHandler(); + var nestedDir = Path.Combine(_testDirectory, "level1", "level2", "level3"); + var destinationPath = Path.Combine(nestedDir, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + Assert.IsTrue(Directory.Exists(nestedDir)); + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_GeneratesUniqueNamesOnCollision() + { + // Arrange + var handler1 = new AtomicFileHandler(); + var handler2 = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath1 = handler1.CreateTemporaryFile(destinationPath); + var tempPath2 = handler2.CreateTemporaryFile(destinationPath); + + // Assert + Assert.AreNotEqual(tempPath1, tempPath2); + Assert.IsTrue(File.Exists(tempPath1)); + Assert.IsTrue(File.Exists(tempPath2)); + } + + [TestMethod] + public void CreateTemporaryFile_CreatesEmptyFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + var fileInfo = new FileInfo(tempPath); + Assert.AreEqual(0, fileInfo.Length); + } + + [TestMethod] + public void CreateTemporaryFile_ReturnsCorrectTempFilePath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + Assert.IsTrue(tempPath.StartsWith(destinationPath)); + Assert.IsTrue(tempPath.Contains(".s3tmp.")); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CreateTemporaryFile_WithNullDestinationPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + + // Act + handler.CreateTemporaryFile(null); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CreateTemporaryFile_WithEmptyDestinationPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + + // Act + handler.CreateTemporaryFile(""); + } + + #endregion + + #region CreateTemporaryFile Tests - Path Handling + + [TestMethod] + public void CreateTemporaryFile_WithAbsolutePath_CreatesFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var absolutePath = Path.Combine(_testDirectory, "absolute.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(absolutePath); + + // Assert + Assert.IsTrue(Path.IsPathRooted(tempPath)); + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_WithRelativePath_CreatesFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var relativePath = "relative.dat"; + + // Act + var tempPath = handler.CreateTemporaryFile(relativePath); + + // Assert + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_WithSpecialCharactersInPath_CreatesFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var specialPath = Path.Combine(_testDirectory, "test[1]@2024.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(specialPath); + + // Assert + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_WithDeepDirectoryStructure_CreatesAllNestedDirectories() + { + // Arrange + var handler = new AtomicFileHandler(); + var deepPath = Path.Combine(_testDirectory, "a", "b", "c", "d", "e", "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(deepPath); + + // Assert + Assert.IsTrue(Directory.Exists(Path.GetDirectoryName(deepPath))); + Assert.IsTrue(File.Exists(tempPath)); + } + + #endregion + + #region CommitFile Tests + + [TestMethod] + public void CommitFile_MovesTempFileToDestination() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "final.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Write some data to temp file + File.WriteAllText(tempPath, "test content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + Assert.AreEqual("test content", File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void CommitFile_OverwritesExistingDestination() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "existing.dat"); + + // Create existing file + File.WriteAllText(destinationPath, "old content"); + + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "new content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual("new content", File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void CommitFile_ClearsInternalTempFilePath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + handler.Dispose(); // Should not try to cleanup already-committed file + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CommitFile_WithNullTempPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + handler.CommitFile(null, destinationPath); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CommitFile_WithNullDestinationPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempPath = Path.Combine(_testDirectory, "temp.dat"); + File.Create(tempPath).Dispose(); + + // Act + handler.CommitFile(tempPath, null); + } + + [TestMethod] + [ExpectedException(typeof(FileNotFoundException))] + public void CommitFile_WithMissingTempFile_ThrowsFileNotFoundException() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempPath = Path.Combine(_testDirectory, "nonexistent.s3tmp.ABCD1234"); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + handler.CommitFile(tempPath, destinationPath); + } + + [TestMethod] + public void CommitFile_ToSameDirectory_Succeeds() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "file.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CommitFile_ToDifferentDirectory_Succeeds() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempDir = Path.Combine(_testDirectory, "temp"); + var finalDir = Path.Combine(_testDirectory, "final"); + Directory.CreateDirectory(tempDir); + Directory.CreateDirectory(finalDir); + + var tempPath = Path.Combine(tempDir, "file.s3tmp.ABCD1234"); + var destinationPath = Path.Combine(finalDir, "file.dat"); + File.WriteAllText(tempPath, "content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + } + + #endregion + + #region CleanupOnFailure Tests + + [TestMethod] + public void CleanupOnFailure_DeletesTempFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CleanupOnFailure_WithExplicitPath_DeletesSpecifiedFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempPath = Path.Combine(_testDirectory, "explicit.s3tmp.ABCD1234"); + File.Create(tempPath).Dispose(); + + // Act + handler.CleanupOnFailure(tempPath); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CleanupOnFailure_WithMissingFile_DoesNotThrow() + { + // Arrange + var handler = new AtomicFileHandler(); + + // Act & Assert - Should not throw + handler.CleanupOnFailure(); + } + + [TestMethod] + public void CleanupOnFailure_ClearsInternalTempFilePath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(); + handler.CleanupOnFailure(); // Second call should be safe + + // Assert - No exception thrown + } + + [TestMethod] + public void CleanupOnFailure_CanBeCalledMultipleTimes() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + + // Act & Assert - Should not throw + handler.CleanupOnFailure(); + handler.CleanupOnFailure(); + handler.CleanupOnFailure(); + } + + [TestMethod] + public void CleanupOnFailure_WithNullPath_UsesInternalPath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(null); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CleanupOnFailure_WithEmptyPath_UsesInternalPath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(""); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + #endregion + + #region GenerateRandomId Tests + + [TestMethod] + public void GenerateRandomId_GeneratesDifferentIdsOnSuccessiveCalls() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act - Create multiple temp files to generate multiple IDs + var tempPath1 = handler.CreateTemporaryFile(destinationPath); + var handler2 = new AtomicFileHandler(); + var tempPath2 = handler2.CreateTemporaryFile(destinationPath); + var handler3 = new AtomicFileHandler(); + var tempPath3 = handler3.CreateTemporaryFile(destinationPath); + + // Extract IDs + var id1 = Regex.Match(tempPath1, @"\.s3tmp\.([A-Z2-7]{8})$").Groups[1].Value; + var id2 = Regex.Match(tempPath2, @"\.s3tmp\.([A-Z2-7]{8})$").Groups[1].Value; + var id3 = Regex.Match(tempPath3, @"\.s3tmp\.([A-Z2-7]{8})$").Groups[1].Value; + + // Assert - IDs should be different (statistically) + Assert.IsFalse(id1 == id2 && id2 == id3, "All three IDs are identical - very unlikely with proper random generation"); + } + + [TestMethod] + public void GenerateRandomId_UsesBase32CharacterSet() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + var match = Regex.Match(tempPath, @"\.s3tmp\.([A-Z2-7]{8})$"); + var id = match.Groups[1].Value; + + // Assert - Should only contain A-Z and 2-7 (RFC 4648 base32) + Assert.IsTrue(Regex.IsMatch(id, "^[A-Z2-7]+$"), $"ID '{id}' contains invalid base32 characters"); + } + + [TestMethod] + public void GenerateRandomId_IdsAreFilesystemSafe() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + var match = Regex.Match(tempPath, @"\.s3tmp\.([A-Z2-7]{8})$"); + var id = match.Groups[1].Value; + + // Assert - No problematic characters (/, \, :, *, ?, ", <, >, |) + var problematicChars = new[] { '/', '\\', ':', '*', '?', '"', '<', '>', '|' }; + Assert.IsFalse(id.Any(c => problematicChars.Contains(c))); + } + + #endregion + + #region Dispose Tests + + [TestMethod] + public void Dispose_CallsCleanupOnUncommittedFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.Dispose(); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void Dispose_CanBeCalledMultipleTimes() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + + // Act & Assert - Should not throw + handler.Dispose(); + handler.Dispose(); + handler.Dispose(); + } + + [TestMethod] + public void Dispose_DoesNotCleanupCommittedFiles() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "content"); + handler.CommitFile(tempPath, destinationPath); + + // Act + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual("content", File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void Dispose_SafeAfterManualCleanup() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + handler.CleanupOnFailure(); + + // Act & Assert - Should not throw + handler.Dispose(); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public void Integration_CreateWriteCommit_Success() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "final.dat"); + var testData = "Integration test content"; + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, testData); + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + Assert.AreEqual(testData, File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void Integration_CreateWriteCleanup_Success() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "temp.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "temporary content"); + handler.CleanupOnFailure(); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + Assert.IsFalse(File.Exists(destinationPath)); + } + + [TestMethod] + public void Integration_ConcurrentTempFileCreation_AllFilesUnique() + { + // Arrange + var handlers = Enumerable.Range(0, 10).Select(_ => new AtomicFileHandler()).ToArray(); + var destinationPath = Path.Combine(_testDirectory, "concurrent.dat"); + + // Act + var tempPaths = handlers.Select(h => h.CreateTemporaryFile(destinationPath)).ToArray(); + + // Assert - All paths should be unique + Assert.AreEqual(10, tempPaths.Distinct().Count()); + Assert.IsTrue(tempPaths.All(File.Exists)); + + // Cleanup + foreach (var handler in handlers) + { + handler.Dispose(); + } + } + + [TestMethod] + public void Integration_VerifyFileAtomicity_NoPartialWrites() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "atomic.dat"); + var largeData = new string('A', 1024 * 1024); // 1MB of data + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, largeData); + handler.CommitFile(tempPath, destinationPath); + + // Assert + var finalContent = File.ReadAllText(destinationPath); + Assert.AreEqual(largeData.Length, finalContent.Length); + Assert.IsTrue(finalContent.All(c => c == 'A')); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs index dcd9c2734cae..48c3e8369170 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs @@ -255,16 +255,15 @@ public async Task ProcessPartAsync_WithUnexpectedEOF_ThrowsIOException() var response = new GetObjectResponse { ContentLength = expectedBytes, // Promise 1MB - ResponseStream = new MemoryStream(partData) // Only deliver 512KB + ResponseStream = new MemoryStream(partData), // Only deliver 512KB + ResponseMetadata = new Amazon.Runtime.ResponseMetadata() }; // Act & Assert - var exception = await Assert.ThrowsExceptionAsync( + var exception = await Assert.ThrowsExceptionAsync( async () => await handler.ProcessPartAsync(1, response, CancellationToken.None)); // Verify exception message contains key information - StringAssert.Contains(exception.Message, "Unexpected end of stream"); - StringAssert.Contains(exception.Message, "part 1"); StringAssert.Contains(exception.Message, expectedBytes.ToString()); StringAssert.Contains(exception.Message, actualBytes.ToString()); } @@ -285,16 +284,17 @@ public async Task ProcessPartAsync_WithUnexpectedEOF_DoesNotBufferPartialData() var response = new GetObjectResponse { ContentLength = expectedBytes, - ResponseStream = new MemoryStream(partData) + ResponseStream = new MemoryStream(partData), + ResponseMetadata = new Amazon.Runtime.ResponseMetadata() }; // Act try { await handler.ProcessPartAsync(1, response, CancellationToken.None); - Assert.Fail("Expected IOException was not thrown"); + Assert.Fail("Expected StreamSizeMismatchException was not thrown"); } - catch (IOException) + catch (Amazon.S3.Model.StreamSizeMismatchException) { // Expected } diff --git a/sdk/test/Services/S3/UnitTests/Custom/ContentRangeParserTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ContentRangeParserTests.cs new file mode 100644 index 000000000000..a3896f08616a --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/ContentRangeParserTests.cs @@ -0,0 +1,381 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3.Util; +using System; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class ContentRangeParserTests + { + #region Parse Method Tests + + [TestMethod] + [TestCategory("S3")] + public void Parse_ValidContentRange_ReturnsCorrectValues() + { + // Arrange + var contentRange = "bytes 0-5242879/52428800"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(5242879, endByte); + Assert.AreEqual(52428800, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_ContentRangeWithoutBytesPrefix_ReturnsCorrectValues() + { + // Arrange - After .Replace("bytes ", ""), format becomes just "0-1023/2048" + var contentRange = "0-1023/2048"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(1023, endByte); + Assert.AreEqual(2048, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_SingleByteRange_ReturnsCorrectValues() + { + // Arrange - Edge case: single byte + var contentRange = "bytes 0-0/1"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(0, endByte); + Assert.AreEqual(1, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_LastByteOfObject_ReturnsCorrectValues() + { + // Arrange - Edge case: last byte + var contentRange = "bytes 999-999/1000"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(999, startByte); + Assert.AreEqual(999, endByte); + Assert.AreEqual(1000, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_MiddleRange_ReturnsCorrectValues() + { + // Arrange + var contentRange = "bytes 8388608-16777215/33554432"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(8388608, startByte); + Assert.AreEqual(16777215, endByte); + Assert.AreEqual(33554432, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_LargeFileRange_ReturnsCorrectValues() + { + // Arrange - Test with large values (multi-GB file) + var contentRange = "bytes 5368709120-10737418239/53687091200"; // 50GB file + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(5368709120L, startByte); + Assert.AreEqual(10737418239L, endByte); + Assert.AreEqual(53687091200L, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_NullContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.Parse(null); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_EmptyContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.Parse(string.Empty); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_MissingSlash_ThrowsException() + { + // Arrange - Invalid format: missing slash separator + var contentRange = "bytes 0-1023"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_MissingDash_ThrowsException() + { + // Arrange - Invalid format: missing dash in range + var contentRange = "bytes 0 1023/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_InvalidStartByte_ThrowsException() + { + // Arrange - Invalid: non-numeric start byte + var contentRange = "bytes abc-1023/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_InvalidEndByte_ThrowsException() + { + // Arrange - Invalid: non-numeric end byte + var contentRange = "bytes 0-xyz/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_InvalidTotalSize_ThrowsException() + { + // Arrange - Invalid: non-numeric total size + var contentRange = "bytes 0-1023/xyz"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_WildcardTotalSize_ThrowsException() + { + // Arrange - S3 should never return wildcard, but test handling + var contentRange = "bytes 0-1023/*"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_TooManySlashes_ThrowsException() + { + // Arrange - Invalid format: extra slashes + var contentRange = "bytes 0-1023/2048/extra"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_TooManyDashes_ThrowsException() + { + // Arrange - Invalid format: extra dashes + var contentRange = "bytes 0-512-1023/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_ExtraSpaces_ReturnsCorrectValues() + { + // Arrange - ContentRange with multiple spaces (should handle gracefully) + var contentRange = "bytes 0-1023/2048"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(1023, endByte); + Assert.AreEqual(2048, totalSize); + } + + #endregion + + #region GetStartByte Method Tests + + [TestMethod] + [TestCategory("S3")] + public void GetStartByte_ValidContentRange_ReturnsStartByte() + { + // Arrange + var contentRange = "bytes 8388608-16777215/33554432"; + + // Act + var startByte = ContentRangeParser.GetStartByte(contentRange); + + // Assert + Assert.AreEqual(8388608, startByte); + } + + [TestMethod] + [TestCategory("S3")] + public void GetStartByte_ZeroStart_ReturnsZero() + { + // Arrange + var contentRange = "bytes 0-1023/2048"; + + // Act + var startByte = ContentRangeParser.GetStartByte(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetStartByte_InvalidContentRange_ThrowsException() + { + // Arrange + var contentRange = "invalid"; + + // Act & Assert + ContentRangeParser.GetStartByte(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetStartByte_NullContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.GetStartByte(null); + } + + #endregion + + #region GetTotalSize Method Tests + + [TestMethod] + [TestCategory("S3")] + public void GetTotalSize_ValidContentRange_ReturnsTotalSize() + { + // Arrange + var contentRange = "bytes 0-5242879/52428800"; + + // Act + var totalSize = ContentRangeParser.GetTotalSize(contentRange); + + // Assert + Assert.AreEqual(52428800, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void GetTotalSize_SingleByte_ReturnsOne() + { + // Arrange + var contentRange = "bytes 0-0/1"; + + // Act + var totalSize = ContentRangeParser.GetTotalSize(contentRange); + + // Assert + Assert.AreEqual(1, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetTotalSize_InvalidContentRange_ThrowsException() + { + // Arrange + var contentRange = "invalid"; + + // Act & Assert + ContentRangeParser.GetTotalSize(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetTotalSize_NullContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.GetTotalSize(null); + } + + [TestMethod] + [TestCategory("S3")] + public void GetTotalSize_LargeFile_ReturnsCorrectSize() + { + // Arrange - Test with very large file (>50GB) + var contentRange = "bytes 0-8388607/53687091200"; + + // Act + var totalSize = ContentRangeParser.GetTotalSize(contentRange); + + // Assert + Assert.AreEqual(53687091200L, totalSize); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/FileDownloadConfigurationTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FileDownloadConfigurationTests.cs new file mode 100644 index 000000000000..afc818d8906f --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FileDownloadConfigurationTests.cs @@ -0,0 +1,334 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class FileDownloadConfigurationTests + { + #region Constructor Tests - Valid Parameters + + [TestMethod] + public void Constructor_WithValidParameters_CreatesConfiguration() + { + // Arrange + var concurrentRequests = 5; + var bufferSize = 8192; + var partSize = 8 * 1024 * 1024; + var destinationPath = "test.dat"; + + // Act + var config = new FileDownloadConfiguration( + concurrentRequests, + bufferSize, + partSize, + destinationPath); + + // Assert + Assert.IsNotNull(config); + } + + [TestMethod] + public void Constructor_SetsConcurrentServiceRequests() + { + // Arrange + var expectedConcurrentRequests = 10; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + concurrentRequests: expectedConcurrentRequests); + + // Assert + Assert.AreEqual(expectedConcurrentRequests, config.ConcurrentServiceRequests); + } + + [TestMethod] + public void Constructor_SetsTargetPartSizeBytes() + { + // Arrange + var expectedPartSize = 16 * 1024 * 1024; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: expectedPartSize); + + // Assert + Assert.AreEqual(expectedPartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_SetsBufferSize() + { + // Arrange + var expectedBufferSize = 16384; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + bufferSize: expectedBufferSize); + + // Assert + Assert.AreEqual(expectedBufferSize, config.BufferSize); + } + + [TestMethod] + public void Constructor_SetsDestinationFilePath() + { + // Arrange + var expectedPath = Path.Combine(Path.GetTempPath(), "test-file.dat"); + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: expectedPath); + + // Assert + Assert.AreEqual(expectedPath, config.DestinationFilePath); + } + + #endregion + + #region Constructor Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroConcurrentServiceRequests_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(0, 8192, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeConcurrentServiceRequests_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(-1, 8192, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroBufferSize_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, 0, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeBufferSize_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, -1, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroTargetPartSizeBytes_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 0, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeTargetPartSizeBytes_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, -1, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void Constructor_WithNullDestinationFilePath_ThrowsArgumentException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 8 * 1024 * 1024, null); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void Constructor_WithEmptyDestinationFilePath_ThrowsArgumentException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 8 * 1024 * 1024, ""); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void Constructor_WithWhitespaceDestinationFilePath_ThrowsArgumentException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 8 * 1024 * 1024, " "); + } + + #endregion + + #region Property Tests + + [TestMethod] + public void BufferSize_PropertyGetter_ReturnsCorrectValue() + { + // Arrange + var expectedBufferSize = 16384; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + bufferSize: expectedBufferSize); + + // Act + var actualBufferSize = config.BufferSize; + + // Assert + Assert.AreEqual(expectedBufferSize, actualBufferSize); + } + + [TestMethod] + public void DestinationFilePath_PropertyGetter_ReturnsCorrectValue() + { + // Arrange + var expectedPath = "test-file.dat"; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: expectedPath); + + // Act + var actualPath = config.DestinationFilePath; + + // Assert + Assert.AreEqual(expectedPath, actualPath); + } + + [TestMethod] + public void ConcurrentServiceRequests_InheritsFromBase() + { + // Arrange + var expectedValue = 15; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + concurrentRequests: expectedValue); + + // Act + var actualValue = config.ConcurrentServiceRequests; + + // Assert + Assert.AreEqual(expectedValue, actualValue); + } + + [TestMethod] + public void TargetPartSizeBytes_InheritsFromBase() + { + // Arrange + var expectedValue = 16 * 1024 * 1024; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: expectedValue); + + // Act + var actualValue = config.TargetPartSizeBytes; + + // Assert + Assert.AreEqual(expectedValue, actualValue); + } + + #endregion + + #region Edge Case Tests + + [TestMethod] + public void Constructor_WithMinimumValidValues_CreatesConfiguration() + { + // Arrange & Act + var config = new FileDownloadConfiguration(1, 1, 1, "a"); + + // Assert + Assert.IsNotNull(config); + Assert.AreEqual(1, config.ConcurrentServiceRequests); + Assert.AreEqual(1, config.BufferSize); + Assert.AreEqual(1, config.TargetPartSizeBytes); + Assert.AreEqual("a", config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithLargeBufferSize_CreatesConfiguration() + { + // Arrange + var largeBufferSize = 1024 * 1024; // 1MB buffer + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + bufferSize: largeBufferSize); + + // Assert + Assert.AreEqual(largeBufferSize, config.BufferSize); + } + + [TestMethod] + public void Constructor_WithLargePartSize_CreatesConfiguration() + { + // Arrange + var largePartSize = 128L * 1024 * 1024; // 128MB + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: largePartSize); + + // Assert + Assert.AreEqual(largePartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_WithVeryLongFilePath_CreatesConfiguration() + { + // Arrange - Create a long but valid path + var longFileName = new string('a', 200) + ".dat"; + var longPath = Path.Combine(Path.GetTempPath(), longFileName); + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: longPath); + + // Assert + Assert.AreEqual(longPath, config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithFilePathContainingSpecialCharacters_CreatesConfiguration() + { + // Arrange + var specialPath = Path.Combine(Path.GetTempPath(), "test-file[1]@2024.dat"); + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: specialPath); + + // Assert + Assert.AreEqual(specialPath, config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithUNCPath_CreatesConfiguration() + { + // Arrange + var uncPath = @"\\server\share\file.dat"; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: uncPath); + + // Assert + Assert.AreEqual(uncPath, config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithRelativePath_CreatesConfiguration() + { + // Arrange + var relativePath = @".\subfolder\file.dat"; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: relativePath); + + // Assert + Assert.AreEqual(relativePath, config.DestinationFilePath); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs new file mode 100644 index 000000000000..3d0c41243648 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs @@ -0,0 +1,367 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Comprehensive concurrency tests for FilePartDataHandler to validate that concurrent file writes + /// using FileShare.Write and different offsets don't cause data corruption. + /// + /// Each test verifies every byte matches expected patterns after concurrent writes complete. + /// + [TestClass] + public class FilePartDataHandlerConcurrencyTests + { + private string _testDirectory; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Helper Methods + + /// + /// Executes a concurrent write test with the specified parameters and returns the final file data. + /// + /// Name of the file to create + /// Size of each part in bytes + /// Number of parts to write + /// Optional custom write order. If null, writes parts sequentially (1,2,3...) + /// The final file data as byte array + private async Task ExecuteConcurrentWriteTest( + string fileName, + int partSize, + int partCount, + int[] writeOrder = null) + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, fileName); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: partSize, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Determine write order (default to sequential if not specified) + var order = writeOrder ?? Enumerable.Range(1, partCount).ToArray(); + + // Act - Create and execute all write tasks concurrently + var tasks = new Task[partCount]; + for (int i = 0; i < partCount; i++) + { + var partNum = order[i]; + var offset = (partNum - 1) * partSize; + var partData = MultipartDownloadTestHelpers.GeneratePartSpecificData(partSize, partNum); + + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + partSize - 1}/{partCount * partSize}" + }; + + tasks[i] = handler.ProcessPartAsync(partNum, response, CancellationToken.None); + } + + await Task.WhenAll(tasks); + handler.OnDownloadComplete(null); + + // Return the final file data for verification + Assert.IsTrue(File.Exists(destinationPath), "Destination file should exist"); + return File.ReadAllBytes(destinationPath); + } + + /// + /// Executes a concurrent write test with varying part sizes and returns the final file data. + /// + private async Task ExecuteVaryingSizeTest( + string fileName, + (int PartNum, int Size, int Offset)[] partDefinitions, + int[] writeOrder = null) + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, fileName); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var totalSize = partDefinitions.Sum(p => p.Size); + var order = writeOrder ?? Enumerable.Range(0, partDefinitions.Length).ToArray(); + + // Act - Write parts with varying sizes + var tasks = new Task[partDefinitions.Length]; + for (int i = 0; i < order.Length; i++) + { + var partIdx = order[i]; + var part = partDefinitions[partIdx]; + var partData = MultipartDownloadTestHelpers.GeneratePartSpecificData(part.Size, part.PartNum); + + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {part.Offset}-{part.Offset + part.Size - 1}/{totalSize}" + }; + + tasks[i] = handler.ProcessPartAsync(part.PartNum, response, CancellationToken.None); + } + + await Task.WhenAll(tasks); + handler.OnDownloadComplete(null); + + Assert.IsTrue(File.Exists(destinationPath)); + return File.ReadAllBytes(destinationPath); + } + + /// + /// Verifies that every byte in the file matches the expected pattern for uniform part sizes. + /// + private void VerifyAllBytes(byte[] fileData, int partSize, int partCount) + { + Assert.AreEqual(partCount * partSize, fileData.Length, "File size mismatch"); + + for (int i = 0; i < partCount; i++) + { + var expectedData = MultipartDownloadTestHelpers.GeneratePartSpecificData(partSize, i + 1); + var actualData = fileData.Skip(i * partSize).Take(partSize).ToArray(); + CollectionAssert.AreEqual(expectedData, actualData, $"Part {i + 1} data corrupted"); + } + } + + /// + /// Verifies that every byte in the file matches the expected pattern for varying part sizes. + /// + private void VerifyVaryingSizeBytes(byte[] fileData, (int PartNum, int Size, int Offset)[] partDefinitions) + { + var totalSize = partDefinitions.Sum(p => p.Size); + Assert.AreEqual(totalSize, fileData.Length, "File size mismatch"); + + foreach (var part in partDefinitions) + { + var expectedData = MultipartDownloadTestHelpers.GeneratePartSpecificData(part.Size, part.PartNum); + var actualData = fileData.Skip(part.Offset).Take(part.Size).ToArray(); + CollectionAssert.AreEqual(expectedData, actualData, + $"Part {part.PartNum} (size {part.Size} at offset {part.Offset}) corrupted"); + } + } + + #endregion + + #region Sequential Order Tests + + [TestMethod] + public async Task ConcurrentWrites_SequentialOrder_VerifyEveryByte() + { + // Act - Write parts in sequential order (1, 2, 3, 4, 5) concurrently + var partSize = 4096; // 4KB per part + var partCount = 5; + var fileData = await ExecuteConcurrentWriteTest("sequential.dat", partSize, partCount); + + // Assert - Verify temp files cleaned up and every byte matches expected pattern + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length, "Temp files should be cleaned up after commit"); + + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_SequentialOrder_LargeParts_VerifyEveryByte() + { + // Act - Write large parts concurrently in sequential order + var partSize = 1024 * 1024; // 1MB per part + var partCount = 3; + var fileData = await ExecuteConcurrentWriteTest("sequential-large.dat", partSize, partCount); + + // Assert - Verify every byte + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region Reverse Order Tests + + [TestMethod] + public async Task ConcurrentWrites_ReverseOrder_VerifyEveryByte() + { + // Act - Write parts in reverse order (5, 4, 3, 2, 1) concurrently + var partSize = 4096; // 4KB per part + var partCount = 5; + var reverseOrder = Enumerable.Range(1, partCount).Reverse().ToArray(); + var fileData = await ExecuteConcurrentWriteTest("reverse.dat", partSize, partCount, reverseOrder); + + // Assert - Verify every byte matches expected pattern + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_ReverseOrder_LargeParts_VerifyEveryByte() + { + // Act - Write large parts in reverse order concurrently + var partSize = 1024 * 1024; // 1MB per part + var partCount = 3; + var reverseOrder = Enumerable.Range(1, partCount).Reverse().ToArray(); + var fileData = await ExecuteConcurrentWriteTest("reverse-large.dat", partSize, partCount, reverseOrder); + + // Assert - Verify every byte + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region Random Order Tests + + [TestMethod] + public async Task ConcurrentWrites_RandomOrder_VerifyEveryByte() + { + // Act - Write parts in random order (3, 1, 7, 2, 5, 8, 4, 6) concurrently + var partSize = 4096; // 4KB per part + var partCount = 8; + var randomOrder = new[] { 3, 1, 7, 2, 5, 8, 4, 6 }; + var fileData = await ExecuteConcurrentWriteTest("random.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte matches expected pattern + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_ComplexRandomOrder_VerifyEveryByte() + { + // Act - Write parts in complex random order concurrently + var partSize = 8192; // 8KB per part + var partCount = 12; + var randomOrder = new[] { 7, 2, 11, 4, 1, 9, 12, 3, 6, 10, 5, 8 }; + var fileData = await ExecuteConcurrentWriteTest("complex-random.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region High Concurrency Tests + + [TestMethod] + public async Task ConcurrentWrites_TwentyParts_VerifyEveryByte() + { + // Act - Write 20 parts concurrently in random order + var partSize = 4096; // 4KB per part + var partCount = 20; + var randomOrder = Enumerable.Range(1, partCount).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteConcurrentWriteTest("twenty-parts.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte across all 20 parts + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_FiftyParts_VerifyEveryByte() + { + // Act - Write 50 parts concurrently in random order + var partSize = 2048; // 2KB per part (smaller to keep test fast) + var partCount = 50; + var randomOrder = Enumerable.Range(1, partCount).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteConcurrentWriteTest("fifty-parts.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte across all 50 parts + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_HighConcurrency_StressTest_VerifyEveryByte() + { + // Act - Write all parts concurrently with maximum parallelism (stress test) + var partSize = 4096; // 4KB per part + var partCount = 30; + var randomOrder = Enumerable.Range(1, partCount).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteConcurrentWriteTest("stress-test.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte even under high contention + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region Varying Part Size Tests + + [TestMethod] + public async Task ConcurrentWrites_VaryingPartSizes_VerifyEveryByte() + { + // Act - Write parts with varying sizes (1KB, 4KB, 8KB, 2KB, 16KB) concurrently + var partSizes = new[] { 1024, 4096, 8192, 2048, 16384 }; + var offset = 0; + var partDefinitions = partSizes.Select((size, i) => + { + var part = (PartNum: i + 1, Size: size, Offset: offset); + offset += size; + return part; + }).ToArray(); + + var fileData = await ExecuteVaryingSizeTest("varying-sizes.dat", partDefinitions); + + // Assert - Verify every byte with varying sizes + VerifyVaryingSizeBytes(fileData, partDefinitions); + } + + [TestMethod] + public async Task ConcurrentWrites_VaryingSizesRandomOrder_VerifyEveryByte() + { + // Act - Write varying size parts in random order + var partDefinitions = new[] + { + (PartNum: 1, Size: 2048, Offset: 0), + (PartNum: 2, Size: 8192, Offset: 2048), + (PartNum: 3, Size: 4096, Offset: 10240), + (PartNum: 4, Size: 16384, Offset: 14336), + (PartNum: 5, Size: 1024, Offset: 30720) + }; + var randomOrder = Enumerable.Range(0, partDefinitions.Length).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteVaryingSizeTest("varying-sizes-random.dat", partDefinitions, randomOrder); + + // Assert - Verify every byte across varying sizes + VerifyVaryingSizeBytes(fileData, partDefinitions); + } + + #endregion + + #region Mixed Scenario Tests + + [TestMethod] + public async Task ConcurrentWrites_MixedScenario_SmallAndLargeParts_VerifyEveryByte() + { + // Act - Write mixed size parts (100 bytes, 1MB, 500 bytes, 2MB, 1KB) in random order + var partDefinitions = new[] + { + (PartNum: 1, Size: 100, Offset: 0), + (PartNum: 2, Size: 1024 * 1024, Offset: 100), + (PartNum: 3, Size: 500, Offset: 100 + 1024 * 1024), + (PartNum: 4, Size: 2 * 1024 * 1024, Offset: 100 + 1024 * 1024 + 500), + (PartNum: 5, Size: 1024, Offset: 100 + 1024 * 1024 + 500 + 2 * 1024 * 1024) + }; + var randomOrder = Enumerable.Range(0, partDefinitions.Length).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteVaryingSizeTest("mixed-scenario.dat", partDefinitions, randomOrder); + + // Assert - Verify every byte in mixed scenario + VerifyVaryingSizeBytes(fileData, partDefinitions); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs new file mode 100644 index 000000000000..e10005e8764f --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs @@ -0,0 +1,1017 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class FilePartDataHandlerTests + { + private string _testDirectory; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidConfig_CreatesHandler() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var handler = new FilePartDataHandler(config); + + // Assert + Assert.IsNotNull(handler); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfig_ThrowsArgumentNullException() + { + // Act + var handler = new FilePartDataHandler(null); + } + + #endregion + + #region PrepareAsync Tests + + [TestMethod] + public async Task PrepareAsync_CreatesTempFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = 1024 + }; + + // Act + await handler.PrepareAsync(discoveryResult, CancellationToken.None); + + // Assert - Check temp file exists with .s3tmp. pattern + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(1, tempFiles.Length); + Assert.IsTrue(File.Exists(tempFiles[0])); + } + + [TestMethod] + public async Task PrepareAsync_TempFileFollowsPattern() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "myfile.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + var discoveryResult = new DownloadDiscoveryResult(); + + // Act + await handler.PrepareAsync(discoveryResult, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "myfile.dat.s3tmp.*"); + Assert.AreEqual(1, tempFiles.Length); + } + + [TestMethod] + public async Task PrepareAsync_ReturnsCompletedTask() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + var discoveryResult = new DownloadDiscoveryResult(); + + // Act + var task = handler.PrepareAsync(discoveryResult, CancellationToken.None); + + // Assert + Assert.IsTrue(task.IsCompleted); + await task; + } + + #endregion + + #region ProcessPartAsync Tests - Basic Functionality + + [TestMethod] + public async Task ProcessPartAsync_WritesDataToFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(1, tempFiles.Length); + + var writtenData = File.ReadAllBytes(tempFiles[0]); + CollectionAssert.AreEqual(partData, writtenData); + } + + [TestMethod] + public async Task ProcessPartAsync_WritesAtCorrectOffset() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Write part 2 (offset 1024) + var part2Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 1024); + var response = new GetObjectResponse + { + ContentLength = part2Data.Length, + ResponseStream = new MemoryStream(part2Data), + ContentRange = "bytes 1024-2047/2048" + }; + + // Act + await handler.ProcessPartAsync(2, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + // Verify data is at offset 1024 + var actualPart2Data = fileData.Skip(1024).Take(1024).ToArray(); + CollectionAssert.AreEqual(part2Data, actualPart2Data); + } + + #endregion + + #region ProcessPartAsync Tests - Offset Calculation + + [TestMethod] + public async Task ProcessPartAsync_ParsesContentRangeForOffset() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 8388608-8388707/33555032" // Offset 8MB + }; + + // Act + await handler.ProcessPartAsync(2, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + // Verify data is at offset 8388608 + var actualData = fileData.Skip(8388608).Take(100).ToArray(); + CollectionAssert.AreEqual(partData, actualData); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ProcessPartAsync_MissingContentRange_ThrowsInvalidOperationException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1000, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = null // Missing ContentRange should throw + }; + + // Act - Should throw InvalidOperationException + await handler.ProcessPartAsync(3, response, CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ProcessPartAsync_InvalidContentRange_ThrowsInvalidOperationException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1000, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "invalid-format" // Invalid ContentRange should throw + }; + + // Act - Should throw InvalidOperationException + await handler.ProcessPartAsync(2, response, CancellationToken.None); + } + + #endregion + + #region ProcessPartAsync Tests - Data Integrity + + [TestMethod] + public async Task ProcessPartAsync_PreservesDataIntegrity() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.CreateMixedPattern(10240, 42); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-10239/10240" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var writtenData = File.ReadAllBytes(tempFiles[0]); + CollectionAssert.AreEqual(partData, writtenData); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesZeroByteResponse() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var response = new GetObjectResponse + { + ContentLength = 0, + ResponseStream = new MemoryStream(Array.Empty()), + ContentRange = "bytes 0-0/0" + }; + + // Act & Assert - Should not throw + await handler.ProcessPartAsync(1, response, CancellationToken.None); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesSmallPart() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-99/100" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var writtenData = File.ReadAllBytes(tempFiles[0]); + CollectionAssert.AreEqual(partData, writtenData); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesLargePart() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partSize = 16 * 1024 * 1024; // 16MB + var partData = MultipartDownloadTestHelpers.GenerateTestData(partSize, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes 0-{partSize - 1}/{partSize}" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(tempFiles[0], partSize)); + } + + [TestMethod] + public async Task ProcessPartAsync_MultipleWritesPreserveAllData() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Write part 1 + var part1Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response1 = new GetObjectResponse + { + ContentLength = part1Data.Length, + ResponseStream = new MemoryStream(part1Data), + ContentRange = "bytes 0-1023/2048" + }; + await handler.ProcessPartAsync(1, response1, CancellationToken.None); + + // Write part 2 + var part2Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 1024); + var response2 = new GetObjectResponse + { + ContentLength = part2Data.Length, + ResponseStream = new MemoryStream(part2Data), + ContentRange = "bytes 1024-2047/2048" + }; + await handler.ProcessPartAsync(2, response2, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + var actualPart1 = fileData.Take(1024).ToArray(); + var actualPart2 = fileData.Skip(1024).Take(1024).ToArray(); + + CollectionAssert.AreEqual(part1Data, actualPart1); + CollectionAssert.AreEqual(part2Data, actualPart2); + } + + #endregion + + #region ProcessPartAsync Tests - Concurrent Writes + + [TestMethod] + public async Task ProcessPartAsync_SupportsConcurrentWrites() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Create multiple parts + var part1Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var part2Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 1024); + var part3Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 2048); + + var response1 = new GetObjectResponse + { + ContentLength = part1Data.Length, + ResponseStream = new MemoryStream(part1Data), + ContentRange = "bytes 0-1023/3072" + }; + var response2 = new GetObjectResponse + { + ContentLength = part2Data.Length, + ResponseStream = new MemoryStream(part2Data), + ContentRange = "bytes 1024-2047/3072" + }; + var response3 = new GetObjectResponse + { + ContentLength = part3Data.Length, + ResponseStream = new MemoryStream(part3Data), + ContentRange = "bytes 2048-3071/3072" + }; + + // Act - Write all parts concurrently + var tasks = new[] + { + handler.ProcessPartAsync(1, response1, CancellationToken.None), + handler.ProcessPartAsync(2, response2, CancellationToken.None), + handler.ProcessPartAsync(3, response3, CancellationToken.None) + }; + await Task.WhenAll(tasks); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + var actualPart1 = fileData.Take(1024).ToArray(); + var actualPart2 = fileData.Skip(1024).Take(1024).ToArray(); + var actualPart3 = fileData.Skip(2048).Take(1024).ToArray(); + + CollectionAssert.AreEqual(part1Data, actualPart1); + CollectionAssert.AreEqual(part2Data, actualPart2); + CollectionAssert.AreEqual(part3Data, actualPart3); + } + + [TestMethod] + public async Task ProcessPartAsync_ConcurrentWritesDontInterfere() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Create 10 parts with distinct patterns + var tasks = new Task[10]; + for (int i = 0; i < 10; i++) + { + var partNum = i + 1; + var offset = i * 1024; + var partData = MultipartDownloadTestHelpers.GeneratePartSpecificData(1024, partNum); + + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + 1023}/10240" + }; + + tasks[i] = handler.ProcessPartAsync(partNum, response, CancellationToken.None); + } + + // Act + await Task.WhenAll(tasks); + + // Assert - Each part should have its distinct pattern + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + for (int i = 0; i < 10; i++) + { + var expectedData = MultipartDownloadTestHelpers.GeneratePartSpecificData(1024, i + 1); + var actualData = fileData.Skip(i * 1024).Take(1024).ToArray(); + CollectionAssert.AreEqual(expectedData, actualData, $"Part {i + 1} data mismatch"); + } + } + + #endregion + + #region ProcessPartAsync Tests - Error Handling + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ProcessPartAsync_WithoutPrepare_ThrowsInvalidOperationException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + + // Act - Without calling PrepareAsync first + await handler.ProcessPartAsync(1, response, CancellationToken.None); + } + + #endregion + + #region ProcessPartAsync Tests - Cancellation + + [TestMethod] + [ExpectedException(typeof(TaskCanceledException))] + public async Task ProcessPartAsync_WithCancelledToken_ThrowsTaskCanceledException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await handler.ProcessPartAsync(1, response, cts.Token); + } + + #endregion + + #region WaitForCapacityAsync Tests + + [TestMethod] + public async Task WaitForCapacityAsync_ReturnsImmediately() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act + var task = handler.WaitForCapacityAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(task.IsCompleted); + await task; + } + + [TestMethod] + public async Task WaitForCapacityAsync_CanBeCalledMultipleTimes() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert + await handler.WaitForCapacityAsync(CancellationToken.None); + await handler.WaitForCapacityAsync(CancellationToken.None); + await handler.WaitForCapacityAsync(CancellationToken.None); + } + + #endregion + + #region ReleaseCapacity Tests + + [TestMethod] + public void ReleaseCapacity_DoesNotThrow() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert + handler.ReleaseCapacity(); + } + + [TestMethod] + public void ReleaseCapacity_CanBeCalledMultipleTimes() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert + handler.ReleaseCapacity(); + handler.ReleaseCapacity(); + handler.ReleaseCapacity(); + } + + #endregion + + #region OnDownloadComplete Tests - Success Path + + [TestMethod] + public async Task OnDownloadComplete_WithSuccess_CommitsTempFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "final.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Act + handler.OnDownloadComplete(null); // null = success + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + + var finalData = File.ReadAllBytes(destinationPath); + CollectionAssert.AreEqual(partData, finalData); + } + + [TestMethod] + public async Task OnDownloadComplete_WithSuccess_DestinationContainsAllData() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "complete.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Write 3 parts + for (int i = 0; i < 3; i++) + { + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, i * 1024); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {i * 1024}-{(i + 1) * 1024 - 1}/3072" + }; + await handler.ProcessPartAsync(i + 1, response, CancellationToken.None); + } + + // Act + handler.OnDownloadComplete(null); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyMultipartFileContents( + destinationPath, 3, 1024, 0)); + } + + #endregion + + #region OnDownloadComplete Tests - Failure Path + + [TestMethod] + public async Task OnDownloadComplete_WithFailure_CleansTempFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "failed.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Act + handler.OnDownloadComplete(new Exception("Download failed")); + + // Assert + Assert.IsFalse(File.Exists(destinationPath)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + } + + [TestMethod] + public async Task OnDownloadComplete_WithDifferentExceptions_AllHandledCorrectly() + { + // Test with OperationCanceledException + var destinationPath1 = Path.Combine(_testDirectory, "cancelled.dat"); + var config1 = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath1); + var handler1 = new FilePartDataHandler(config1); + await handler1.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + handler1.OnDownloadComplete(new OperationCanceledException()); + Assert.IsFalse(File.Exists(destinationPath1)); + + // Test with IOException + var destinationPath2 = Path.Combine(_testDirectory, "ioerror.dat"); + var config2 = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath2); + var handler2 = new FilePartDataHandler(config2); + await handler2.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + handler2.OnDownloadComplete(new IOException("IO error")); + Assert.IsFalse(File.Exists(destinationPath2)); + } + + #endregion + + #region Dispose Tests + + [TestMethod] + public async Task Dispose_CleansUpUncommittedFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "disposed.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Act + handler.Dispose(); + + // Assert - Temp file should be cleaned up, destination should not exist + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + Assert.IsFalse(File.Exists(destinationPath)); + } + + [TestMethod] + public async Task Dispose_AfterCommit_DoesNotDeleteDestination() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "committed.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + handler.OnDownloadComplete(null); // Commit + + // Act + handler.Dispose(); + + // Assert - Destination should still exist + Assert.IsTrue(File.Exists(destinationPath)); + var finalData = File.ReadAllBytes(destinationPath); + CollectionAssert.AreEqual(partData, finalData); + } + + [TestMethod] + public void Dispose_CanBeCalledMultipleTimes() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert - Multiple calls should not throw + handler.Dispose(); + handler.Dispose(); + handler.Dispose(); + } + + [TestMethod] + public async Task Dispose_WithoutPrepare_DoesNotThrow() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert - Should not throw even if PrepareAsync was never called + handler.Dispose(); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public async Task Integration_CompleteWorkflow_ProducesCorrectFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "integration.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + // Act - Simulate complete download workflow + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Download 5 parts + for (int i = 0; i < 5; i++) + { + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, i * 1024); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {i * 1024}-{(i + 1) * 1024 - 1}/5120" + }; + await handler.ProcessPartAsync(i + 1, response, CancellationToken.None); + } + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyMultipartFileContents( + destinationPath, 5, 1024, 0)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + } + + [TestMethod] + public async Task Integration_ParallelDownload_ProducesCorrectFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "parallel.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Act - Download parts in parallel (reverse order to test offset handling) + var tasks = new Task[5]; + for (int i = 4; i >= 0; i--) + { + var partNum = i + 1; + var offset = i * 1024; + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, offset); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + 1023}/5120" + }; + tasks[4 - i] = handler.ProcessPartAsync(partNum, response, CancellationToken.None); + } + await Task.WhenAll(tasks); + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyMultipartFileContents( + destinationPath, 5, 1024, 0)); + } + + [TestMethod] + public async Task Integration_FailedDownload_CleansUpProperly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "failed-integration.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + // Act + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + handler.OnDownloadComplete(new Exception("Simulated failure")); + handler.Dispose(); + + // Assert - No files should remain + Assert.IsFalse(File.Exists(destinationPath)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + } + + [TestMethod] + public async Task Integration_LargeFileDownload_HandlesCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "large-integration.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024 * 1024, // 1MB parts + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Act - Download 3 parts of 1MB each + for (int i = 0; i < 3; i++) + { + var partSize = 1024 * 1024; + var offset = i * partSize; + var partData = MultipartDownloadTestHelpers.GenerateTestData(partSize, offset); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + partSize - 1}/{3 * partSize}" + }; + await handler.ProcessPartAsync(i + 1, response, CancellationToken.None); + } + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + var expectedSize = 3 * 1024 * 1024; + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, expectedSize)); + } + + [TestMethod] + public async Task Integration_SingleByteFile_HandlesCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "single-byte.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + + // Act - Download single byte + var partData = new byte[] { 0x42 }; + var response = new GetObjectResponse + { + ContentLength = 1, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-0/1" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + var fileData = File.ReadAllBytes(destinationPath); + Assert.AreEqual(1, fileData.Length); + Assert.AreEqual(0x42, fileData[0]); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs new file mode 100644 index 000000000000..bacc470411b9 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs @@ -0,0 +1,796 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Amazon.S3.Util; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class MultipartDownloadCommandTests + { + private string _testDirectory; + private Mock _mockS3Client; + private TransferUtilityConfig _config; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + _mockS3Client = new Mock(); + _config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 4 + }; + + // Setup default S3 client config + var s3Config = new AmazonS3Config + { + BufferSize = 8192, + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesCommand() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullS3Client_ThrowsArgumentNullException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(null, request, _config); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullRequest_ThrowsArgumentNullException() + { + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, null, _config); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfig_ThrowsArgumentNullException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, null); + } + + #endregion + + #region ValidateRequest Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + bucketName: null, + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + bucketName: "", + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingKey_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + key: null, + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyKey_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + key: "", + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + +#if BCL + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingFilePath_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest(filePath: null); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyFilePath_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest(filePath: ""); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } +#endif + + #endregion + + #region CreateConfiguration Tests + + [TestMethod] + public async Task ExecuteAsync_UsesRequestPartSize_WhenSet() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var customPartSize = 16 * 1024 * 1024; // 16MB + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath, + partSize: customPartSize); + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert - Verify coordinator was called (validates config was created) + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task ExecuteAsync_UsesDefaultPartSize_WhenNotSet() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + // Don't set PartSize - should use 8MB default + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert - Verify coordinator was called + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task ExecuteAsync_UsesConcurrentRequestsFromConfig() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + _config.ConcurrentServiceRequests = 10; + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task ExecuteAsync_UsesBufferSizeFromS3ClientConfig() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var s3Config = new AmazonS3Config + { + BufferSize = 16384, // Custom buffer size + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + // Verify the command executed successfully with custom buffer size + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + #endregion + + #region ExecuteAsync Tests - Single Part Download + + [TestMethod] + public async Task ExecuteAsync_SinglePartDownload_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "single-part.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + } + + [TestMethod] + public async Task ExecuteAsync_SinglePartDownload_SetsContentLengthCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 2048; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(fileSize, response.Headers.ContentLength); + } + + [TestMethod] + public async Task ExecuteAsync_SinglePartDownload_SetsContentRangeCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual($"bytes 0-{fileSize - 1}/{fileSize}", response.ContentRange); + } + + #endregion + + #region ExecuteAsync Tests - Response Mapping + + [TestMethod] + public async Task ExecuteAsync_MapsETagCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var expectedETag = "\"abc123def456\""; + SetupSuccessfulSinglePartDownload(1024, eTag: expectedETag); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(expectedETag, response.ETag); + } + + [TestMethod] + public async Task ExecuteAsync_MapsServerSideEncryptionMethodCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, + serverSideEncryptionMethod: ServerSideEncryptionMethod.AES256); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(ServerSideEncryptionMethod.AES256, response.ServerSideEncryptionMethod); + } + + [TestMethod] + public async Task ExecuteAsync_MapsServerSideEncryptionKeyManagementServiceKeyId() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var kmsKeyId = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"; + SetupSuccessfulSinglePartDownload(1024, + serverSideEncryptionKeyManagementServiceKeyId: kmsKeyId); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(kmsKeyId, response.ServerSideEncryptionKeyManagementServiceKeyId); + } + + [TestMethod] + public async Task ExecuteAsync_MapsServerSideEncryptionCustomerMethod() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, + serverSideEncryptionCustomerMethod: ServerSideEncryptionCustomerMethod.AES256); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(ServerSideEncryptionCustomerMethod.AES256, + response.ServerSideEncryptionCustomerMethod); + } + + [TestMethod] + public async Task ExecuteAsync_MapsMetadataCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var metadata = new MetadataCollection(); + metadata["x-amz-meta-custom"] = "custom-value"; + SetupSuccessfulSinglePartDownload(1024, metadata: metadata); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response.Metadata); + Assert.IsTrue(response.Metadata.Count > 0); + } + + #endregion + + #region ExecuteAsync Tests - Composite Checksum Handling + + [TestMethod] + public async Task ExecuteAsync_CompositeChecksum_SetsAllChecksumsToNull() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, + checksumType: ChecksumType.COMPOSITE, + checksumCRC32: "somecrc32", + checksumSHA256: "somesha256"); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(ChecksumType.COMPOSITE, response.ChecksumType); + Assert.IsNull(response.ChecksumCRC32); + Assert.IsNull(response.ChecksumCRC32C); + Assert.IsNull(response.ChecksumCRC64NVME); + Assert.IsNull(response.ChecksumSHA1); + Assert.IsNull(response.ChecksumSHA256); + } + + [TestMethod] + public async Task ExecuteAsync_NonCompositeChecksum_PreservesChecksums() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var expectedCRC32 = "somecrc32value"; + SetupSuccessfulSinglePartDownload(1024, + checksumType: null, // Not composite + checksumCRC32: expectedCRC32); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(expectedCRC32, response.ChecksumCRC32); + } + + [TestMethod] + public async Task ExecuteAsync_NullChecksumType_DoesNotThrow() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, checksumType: null); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act & Assert - Should not throw + var response = await command.ExecuteAsync(CancellationToken.None); + Assert.IsNotNull(response); + } + + #endregion + + #region ExecuteAsync Tests - Error Handling + + [TestMethod] + [ExpectedException(typeof(AmazonS3Exception))] + public async Task ExecuteAsync_S3ClientThrows_PropagatesException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("S3 error")); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + public async Task ExecuteAsync_Exception_CleansUpTempFiles() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(new Exception("Download failed")); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + try + { + await command.ExecuteAsync(CancellationToken.None); + } + catch + { + // Expected exception + } + + // Assert - No temp files should remain + await Task.Delay(100); // Give cleanup time to complete + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length); + } + + #endregion + + #region ExecuteAsync Tests - Cancellation + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task ExecuteAsync_WithCancelledToken_ThrowsOperationCanceledException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(cts.Token); + } + + [TestMethod] + public async Task ExecuteAsync_CancellationDuringDownload_CleansUpProperly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var cts = new CancellationTokenSource(); + + // Setup mock to cancel after being called + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Callback(() => cts.Cancel()) + .ThrowsAsync(new OperationCanceledException()); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + try + { + await command.ExecuteAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - Temp files should be cleaned up + await Task.Delay(100); // Give cleanup time to complete + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public async Task Integration_SmallFileDownload_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "small-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 512; // Small file + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + Assert.AreEqual(fileSize, response.Headers.ContentLength); + Assert.AreEqual($"bytes 0-{fileSize - 1}/{fileSize}", response.ContentRange); + + // Verify no temp files remain + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length); + } + + [TestMethod] + public async Task Integration_LargeFileDownload_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "large-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024 * 1024; // 1MB file + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + Assert.AreEqual(fileSize, response.Headers.ContentLength); + } + + [TestMethod] + public async Task Integration_ZeroByteFile_HandlesCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "empty-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(0); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual(0, new FileInfo(destinationPath).Length); + } + + [TestMethod] + public async Task Integration_OverwriteExistingFile_SucceedsAndOverwrites() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "overwrite-test.dat"); + + // Create existing file with different content + var oldData = MultipartDownloadTestHelpers.GenerateTestData(512, 999); + File.WriteAllBytes(destinationPath, oldData); + + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var newFileSize = 1024; + SetupSuccessfulSinglePartDownload(newFileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, newFileSize)); + + // Verify content was overwritten (not same as oldData) + var newData = File.ReadAllBytes(destinationPath); + Assert.AreNotEqual(oldData.Length, newData.Length); + } + + [TestMethod] + public async Task Integration_NestedDirectory_CreatesDirectoryAndDownloads() + { + // Arrange + var nestedPath = Path.Combine(_testDirectory, "level1", "level2", "level3", "nested-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: nestedPath); + + var fileSize = 2048; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(nestedPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(nestedPath, fileSize)); + } + + #endregion + + #region Helper Methods + + private void SetupSuccessfulSinglePartDownload( + long fileSize, + string eTag = null, + ServerSideEncryptionMethod serverSideEncryptionMethod = null, + string serverSideEncryptionKeyManagementServiceKeyId = null, + ServerSideEncryptionCustomerMethod serverSideEncryptionCustomerMethod = null, + MetadataCollection metadata = null, + ChecksumType checksumType = null, + string checksumCRC32 = null, + string checksumSHA256 = null) + { + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + var response = new GetObjectResponse + { + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + // Real S3 behavior: ContentRange is NOT included for simple GET requests + // (single-part downloads without Range headers). + // ContentRange IS included when Range headers are used, even for single-part downloads. + // This mock simulates a simple GET without Range headers. + ContentRange = null, + ETag = eTag ?? "\"default-etag\"", + ServerSideEncryptionMethod = serverSideEncryptionMethod, + ServerSideEncryptionKeyManagementServiceKeyId = serverSideEncryptionKeyManagementServiceKeyId, + ServerSideEncryptionCustomerMethod = serverSideEncryptionCustomerMethod, + ChecksumType = checksumType, + ChecksumCRC32 = checksumCRC32, + ChecksumSHA256 = checksumSHA256 + }; + + // Add metadata items if provided (Metadata property is read-only) + if (metadata != null) + { + foreach (var key in metadata.Keys) + { + response.Metadata[key] = metadata[key]; + } + } + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(response); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs index 49665433244b..8dd37d092de0 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs @@ -592,5 +592,250 @@ internal static Mock CreateMockDataHandler() } #endregion + + #region FileDownloadConfiguration Creation + + /// + /// Creates a default FileDownloadConfiguration for testing. + /// + internal static FileDownloadConfiguration CreateFileDownloadConfiguration( + int concurrentRequests = DefaultConcurrentRequests, + int bufferSize = BufferSize, + long partSize = DefaultPartSize, + string destinationPath = null) + { + destinationPath = destinationPath ?? Path.Combine(Path.GetTempPath(), $"test-download-{Guid.NewGuid()}.dat"); + return new FileDownloadConfiguration( + concurrentRequests, + bufferSize, + partSize, + destinationPath); + } + + #endregion + + #region TransferUtilityDownloadRequest Creation + + /// + /// Creates a mock TransferUtilityDownloadRequest for testing. + /// + public static TransferUtilityDownloadRequest CreateDownloadRequest( + string bucketName = "test-bucket", + string key = "test-key", + string filePath = null, + long? partSize = null) + { + filePath = filePath ?? Path.Combine(Path.GetTempPath(), $"test-download-{Guid.NewGuid()}.dat"); + + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }; + + if (partSize.HasValue) + { + request.PartSize = partSize.Value; + } + + return request; + } + + #endregion + + #region Temporary File Management + + /// + /// Creates a temporary file path for testing. + /// Returns path in temp directory with unique name. + /// + public static string CreateTempFilePath(string fileName = null) + { + fileName = fileName ?? $"test-download-{Guid.NewGuid()}.dat"; + return Path.Combine(Path.GetTempPath(), fileName); + } + + /// + /// Cleans up temporary files used in tests. + /// Safe to call even if files don't exist. + /// + public static void CleanupTempFiles(params string[] filePaths) + { + foreach (var filePath in filePaths) + { + if (string.IsNullOrEmpty(filePath)) + continue; + + try + { + if (File.Exists(filePath)) + { + File.Delete(filePath); + } + } + catch + { + // Best effort cleanup - don't throw + } + } + } + + /// + /// Creates a temporary directory for test files. + /// + public static string CreateTempDirectory() + { + var tempDir = Path.Combine(Path.GetTempPath(), $"S3Tests_{Guid.NewGuid()}"); + Directory.CreateDirectory(tempDir); + return tempDir; + } + + /// + /// Cleans up a temporary directory and all its contents. + /// Safe to call even if directory doesn't exist. + /// + public static void CleanupTempDirectory(string directoryPath) + { + if (string.IsNullOrEmpty(directoryPath)) + return; + + try + { + if (Directory.Exists(directoryPath)) + { + Directory.Delete(directoryPath, recursive: true); + } + } + catch + { + // Best effort cleanup - don't throw + } + } + + #endregion + + #region File Verification + + /// + /// Verifies file contents match expected data. + /// + public static bool VerifyFileContents(string filePath, byte[] expectedData) + { + if (!File.Exists(filePath)) + return false; + + try + { + var actualData = File.ReadAllBytes(filePath); + return actualData.SequenceEqual(expectedData); + } + catch + { + return false; + } + } + + /// + /// Verifies file exists and has expected size. + /// + public static bool VerifyFileSize(string filePath, long expectedSize) + { + if (!File.Exists(filePath)) + return false; + + try + { + var fileInfo = new FileInfo(filePath); + return fileInfo.Length == expectedSize; + } + catch + { + return false; + } + } + + /// + /// Reads file contents for verification. + /// + public static byte[] ReadFileContents(string filePath) + { + if (!File.Exists(filePath)) + return null; + + try + { + return File.ReadAllBytes(filePath); + } + catch + { + return null; + } + } + + #endregion + + #region Multi-part File Writing Simulation + + /// + /// Simulates writing multiple parts to a file for testing. + /// Each part has predictable data based on part number and seed. + /// + public static void WritePartsToFile( + string filePath, + int totalParts, + long partSize, + int seed = 0) + { + using (var fileStream = new FileStream(filePath, FileMode.Create, FileAccess.Write, FileShare.None)) + { + for (int i = 0; i < totalParts; i++) + { + var partData = GenerateTestData((int)partSize, seed + i * (int)partSize); + fileStream.Write(partData, 0, partData.Length); + } + } + } + + /// + /// Verifies multi-part file contents match expected pattern. + /// + public static bool VerifyMultipartFileContents( + string filePath, + int totalParts, + long partSize, + int seed = 0) + { + if (!File.Exists(filePath)) + return false; + + try + { + using (var fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read)) + { + for (int i = 0; i < totalParts; i++) + { + var expectedData = GenerateTestData((int)partSize, seed + i * (int)partSize); + var actualData = new byte[partSize]; + + var bytesRead = fileStream.Read(actualData, 0, (int)partSize); + if (bytesRead != partSize) + return false; + + if (!expectedData.SequenceEqual(actualData)) + return false; + } + + // Verify no extra data + return fileStream.Position == fileStream.Length; + } + } + catch + { + return false; + } + } + + #endregion } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs index 907c5b52f8d6..ab98d371bc95 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs @@ -281,6 +281,34 @@ public async Task ExecuteAsync_MultipartRangeStrategy_SetsCorrectContentLengthAn response.ResponseStream.Dispose(); } + [TestMethod] + public async Task ExecuteAsync_ZeroByteObject_ContentRangeIsNull() + { + // Arrange - Mock a 0-byte object + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: 0, + eTag: "empty-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - S3 returns null ContentRange for 0-byte objects + Assert.AreEqual(0, response.Headers.ContentLength, + "ContentLength should be 0 for empty object"); + Assert.IsNull(response.ContentRange, + "ContentRange should be null for 0-byte objects (matching S3 behavior)"); + + // Cleanup + response.ResponseStream.Dispose(); + } + #endregion #region Integration Tests From 180f20f78537fe5da2c42298efac73f5a0fe3e55 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 28 Nov 2025 13:08:36 -0500 Subject: [PATCH 26/56] Make AddBuffer non async (#4173) --- .../Internal/BufferedPartDataHandler.cs | 2 +- .../Transfer/Internal/IPartBufferManager.cs | 4 +- .../Transfer/Internal/PartBufferManager.cs | 4 +- .../Custom/BufferedPartDataHandlerTests.cs | 37 +++++++--------- .../Custom/PartBufferManagerTests.cs | 44 +++++++++---------- 5 files changed, 41 insertions(+), 50 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs index ae55c6c2422c..02fb974c7f72 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -82,7 +82,7 @@ public async Task ProcessPartAsync( partNumber, buffer.Length); // Add the buffered part to the buffer manager - await _partBufferManager.AddBufferAsync(buffer, cancellationToken).ConfigureAwait(false); + _partBufferManager.AddBuffer(buffer); Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Added to buffer manager", partNumber); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs index 004c27092eae..9675c60b321e 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs @@ -48,9 +48,7 @@ internal interface IPartBufferManager : IDisposable /// Adds a downloaded part buffer and signals readers when next expected part arrives. /// /// The downloaded part buffer to add. - /// A token to cancel the operation. - /// A task that completes when the buffer has been added and signaling is complete. - Task AddBufferAsync(StreamPartBuffer buffer, CancellationToken cancellationToken); + void AddBuffer(StreamPartBuffer buffer); /// /// Reads data from the buffer manager. Automatically handles sequential part consumption diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs index 57d700363eb0..c679fcb91f9e 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs @@ -84,7 +84,7 @@ namespace Amazon.S3.Transfer.Internal /// - Example: With MaxInMemoryParts=10, if parts 5-14 are buffered, the task downloading /// part 15 blocks here until the reader consumes and releases part 5's buffer /// 2. Read part data from S3 into pooled buffer - /// 3. Add buffered part: await + /// 3. Add buffered part: /// - Adds buffer to _partDataSources dictionary /// - Signals _partAvailable to wake consumer if waiting /// 4. Consumer eventually releases the buffer slot after reading the part @@ -286,7 +286,7 @@ public void AddDataSource(IPartDataSource dataSource) } /// - public async Task AddBufferAsync(StreamPartBuffer buffer, CancellationToken cancellationToken) + public void AddBuffer(StreamPartBuffer buffer) { ThrowIfDisposed(); diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs index 48c3e8369170..e7131cdc208d 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs @@ -78,7 +78,7 @@ public async Task ProcessPartAsync_BuffersPartData() // Assert - should add buffer to manager mockBufferManager.Verify( - x => x.AddBufferAsync(It.IsAny(), It.IsAny()), + x => x.AddBuffer(It.IsAny()), Times.Once); } @@ -92,9 +92,8 @@ public async Task ProcessPartAsync_ReadsExactContentLength() StreamPartBuffer capturedBuffer = null; var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) - .Callback((buffer, ct) => capturedBuffer = buffer) - .Returns(Task.CompletedTask); + mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) + .Callback((buffer) => capturedBuffer = buffer); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); @@ -124,9 +123,8 @@ public async Task ProcessPartAsync_HandlesSmallPart() StreamPartBuffer capturedBuffer = null; var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) - .Callback((buffer, ct) => capturedBuffer = buffer) - .Returns(Task.CompletedTask); + mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) + .Callback((buffer) => capturedBuffer = buffer); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); @@ -155,9 +153,8 @@ public async Task ProcessPartAsync_HandlesLargePart() StreamPartBuffer capturedBuffer = null; var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) - .Callback((buffer, ct) => capturedBuffer = buffer) - .Returns(Task.CompletedTask); + mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) + .Callback((buffer) => capturedBuffer = buffer); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); @@ -191,9 +188,8 @@ public async Task ProcessPartAsync_PreservesDataIntegrity() StreamPartBuffer capturedBuffer = null; var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) - .Callback((buffer, ct) => capturedBuffer = buffer) - .Returns(Task.CompletedTask); + mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) + .Callback((buffer) => capturedBuffer = buffer); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); @@ -234,7 +230,7 @@ public async Task ProcessPartAsync_HandlesZeroByteResponse() // Assert - should handle empty response gracefully mockBufferManager.Verify( - x => x.AddBufferAsync(It.IsAny(), It.IsAny()), + x => x.AddBuffer(It.IsAny()), Times.Once); } @@ -301,7 +297,7 @@ public async Task ProcessPartAsync_WithUnexpectedEOF_DoesNotBufferPartialData() // Assert - should NOT have added any buffer to manager since download failed mockBufferManager.Verify( - x => x.AddBufferAsync(It.IsAny(), It.IsAny()), + x => x.AddBuffer(It.IsAny()), Times.Never); } @@ -335,17 +331,14 @@ public async Task ProcessPartAsync_WithCancelledToken_ThrowsTaskCanceledExceptio } [TestMethod] - public async Task ProcessPartAsync_PassesCancellationTokenToBufferManager() + public async Task ProcessPartAsync_CallsAddBufferOnce() { // Arrange var partSize = 1024; var partData = new byte[partSize]; - CancellationToken capturedToken = default; var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBufferAsync(It.IsAny(), It.IsAny())) - .Callback((buffer, ct) => capturedToken = ct) - .Returns(Task.CompletedTask); + mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); @@ -361,8 +354,8 @@ public async Task ProcessPartAsync_PassesCancellationTokenToBufferManager() // Act await handler.ProcessPartAsync(1, response, cts.Token); - // Assert - Assert.AreEqual(cts.Token, capturedToken); + // Assert - verify AddBuffer was called exactly once + mockBufferManager.Verify(x => x.AddBuffer(It.IsAny()), Times.Once); } #endregion diff --git a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs index 72e3a11158c4..b07ddce455ac 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs @@ -78,7 +78,7 @@ public async Task NextExpectedPartNumber_IncrementsAfterPartComplete() // Add part 1 byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); // Read part 1 completely byte[] readBuffer = new byte[512]; @@ -134,7 +134,7 @@ public async Task WaitForBufferSpaceAsync_WhenMaxPartsReached_Blocks() await manager.WaitForBufferSpaceAsync(CancellationToken.None); byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(i, testBuffer, 512); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); } // Act - Try to wait for space (should block) @@ -169,7 +169,7 @@ public async Task WaitForBufferSpaceAsync_AfterRelease_AllowsAccess() await manager.WaitForBufferSpaceAsync(CancellationToken.None); byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); // Release space manager.ReleaseBufferSpace(); @@ -226,10 +226,10 @@ public async Task WaitForBufferSpaceAsync_WithCancellation_ThrowsOperationCancel #endregion - #region AddBufferAsync Tests + #region AddBuffer Tests [TestMethod] - public async Task AddBufferAsync_CreatesBufferedDataSource() + public async Task AddBuffer_CreatesBufferedDataSource() { // Arrange var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); @@ -241,7 +241,7 @@ public async Task AddBufferAsync_CreatesBufferedDataSource() var partBuffer = new StreamPartBuffer(1, testBuffer, 512); // Act - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); // Assert - Should be able to read from part 1 byte[] readBuffer = new byte[512]; @@ -256,7 +256,7 @@ public async Task AddBufferAsync_CreatesBufferedDataSource() [TestMethod] [ExpectedException(typeof(ArgumentNullException))] - public async Task AddBufferAsync_WithNullBuffer_ThrowsArgumentNullException() + public void AddBuffer_WithNullBuffer_ThrowsArgumentNullException() { // Arrange var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); @@ -265,7 +265,7 @@ public async Task AddBufferAsync_WithNullBuffer_ThrowsArgumentNullException() try { // Act - await manager.AddBufferAsync(null, CancellationToken.None); + manager.AddBuffer(null); // Assert - ExpectedException } @@ -276,7 +276,7 @@ public async Task AddBufferAsync_WithNullBuffer_ThrowsArgumentNullException() } [TestMethod] - public async Task AddBufferAsync_SignalsPartAvailable() + public async Task AddBuffer_SignalsPartAvailable() { // Arrange var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); @@ -297,7 +297,7 @@ public async Task AddBufferAsync_SignalsPartAvailable() // Add the part byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); // Assert - Read should complete int bytesRead = await readTask; @@ -411,7 +411,7 @@ public async Task ReadAsync_ReadsDataSequentially() Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); // Act byte[] readBuffer = new byte[512]; @@ -439,7 +439,7 @@ public async Task ReadAsync_AdvancesNextExpectedPartNumber() // Add part 1 byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); // Read part 1 completely byte[] readBuffer = new byte[512]; @@ -572,7 +572,7 @@ public async Task ReadAsync_WaitsForPartAvailability() // Add the part asynchronously byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); // Assert - Read should complete int bytesRead = await readTask; @@ -653,14 +653,14 @@ public async Task ReadAsync_ReadingAcrossPartBoundary_FillsBuffer() byte[] testBuffer1 = ArrayPool.Shared.Rent(100); Buffer.BlockCopy(testData1, 0, testBuffer1, 0, 100); var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); - await manager.AddBufferAsync(partBuffer1, CancellationToken.None); + manager.AddBuffer(partBuffer1); // Add Part 2 (100 bytes) byte[] testData2 = MultipartDownloadTestHelpers.GenerateTestData(100, 100); byte[] testBuffer2 = ArrayPool.Shared.Rent(100); Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); - await manager.AddBufferAsync(partBuffer2, CancellationToken.None); + manager.AddBuffer(partBuffer2); // Act - Request 150 bytes (spans both parts) byte[] readBuffer = new byte[150]; @@ -700,7 +700,7 @@ public async Task ReadAsync_MultiplePartsInSingleRead_AdvancesCorrectly() byte[] testBuffer = ArrayPool.Shared.Rent(50); Buffer.BlockCopy(testData, 0, testBuffer, 0, 50); var partBuffer = new StreamPartBuffer(i, testBuffer, 50); - await manager.AddBufferAsync(partBuffer, CancellationToken.None); + manager.AddBuffer(partBuffer); } // Act - Read 150 bytes (all 3 parts) @@ -729,7 +729,7 @@ public async Task ReadAsync_PartCompletes_AdvancesToNextPart() // Add part 1 byte[] testBuffer1 = ArrayPool.Shared.Rent(100); var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); - await manager.AddBufferAsync(partBuffer1, CancellationToken.None); + manager.AddBuffer(partBuffer1); // Read part 1 completely byte[] readBuffer = new byte[100]; @@ -741,7 +741,7 @@ public async Task ReadAsync_PartCompletes_AdvancesToNextPart() // Add part 2 byte[] testBuffer2 = ArrayPool.Shared.Rent(100); var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); - await manager.AddBufferAsync(partBuffer2, CancellationToken.None); + manager.AddBuffer(partBuffer2); // Read part 2 int bytesRead = await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); @@ -768,14 +768,14 @@ public async Task ReadAsync_EmptyPart_ContinuesToNextPart() // Add empty part 1 byte[] testBuffer1 = ArrayPool.Shared.Rent(100); var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 0); // 0 bytes - await manager.AddBufferAsync(partBuffer1, CancellationToken.None); + manager.AddBuffer(partBuffer1); // Add part 2 with data byte[] testData2 = MultipartDownloadTestHelpers.GenerateTestData(100, 0); byte[] testBuffer2 = ArrayPool.Shared.Rent(100); Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); - await manager.AddBufferAsync(partBuffer2, CancellationToken.None); + manager.AddBuffer(partBuffer2); // Act - Try to read 100 bytes starting from part 1 byte[] readBuffer = new byte[100]; @@ -945,7 +945,7 @@ public void Dispose_DisposesAllDataSources() byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - manager.AddBufferAsync(partBuffer, CancellationToken.None).Wait(); + manager.AddBuffer(partBuffer); // Act manager.Dispose(); @@ -963,7 +963,7 @@ public void Dispose_ClearsCollection() byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); - manager.AddBufferAsync(partBuffer, CancellationToken.None).Wait(); + manager.AddBuffer(partBuffer); // Act manager.Dispose(); From 796cdcc39e47bc6cd64fc71ef671f4da5accb45d Mon Sep 17 00:00:00 2001 From: Philippe El Asmar <53088140+philasmar@users.noreply.github.com> Date: Fri, 28 Nov 2025 15:06:29 -0500 Subject: [PATCH 27/56] add failure policy to download directory (#4151) --- .gitignore | 4 +- .../c49077d9-90b3-437f-b316-6d8d8833ae76.json | 12 + .../Transfer/Internal/AbortOnFailurePolicy.cs | 72 ++++ .../Internal/ContinueOnFailurePolicy.cs | 104 ++++++ .../Internal/DownloadDirectoryCommand.cs | 14 +- .../Transfer/Internal/IFailurePolicy.cs | 62 ++++ .../Internal/_async/BaseCommand.async.cs | 6 +- .../DownloadDirectoryCommand.cs | 96 +++-- .../UploadDirectoryCommand.cs | 31 +- .../Custom/Transfer/Model/DirectoryResult.cs | 43 +++ .../S3/Custom/Transfer/Model/FailurePolicy.cs | 40 +++ ...TransferUtilityDownloadDirectoryRequest.cs | 112 +++++- ...ransferUtilityDownloadDirectoryResponse.cs | 21 +- .../S3/UnitTests/Custom/FailurePolicyTests.cs | 340 ++++++++++++++++++ 14 files changed, 906 insertions(+), 51 deletions(-) create mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/AbortOnFailurePolicy.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/ContinueOnFailurePolicy.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/IFailurePolicy.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Model/DirectoryResult.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/Model/FailurePolicy.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs diff --git a/.gitignore b/.gitignore index 5042327fe021..7f81167eff89 100644 --- a/.gitignore +++ b/.gitignore @@ -69,4 +69,6 @@ sdk/test/Performance/**/BenchmarkDotNet.Artifacts/* #protocol-tests sdk/test/ProtocolTests/Generated/**/model sdk/test/ProtocolTests/Generated/**/sources -sdk/test/ProtocolTests/Generated/**/build-info \ No newline at end of file +sdk/test/ProtocolTests/Generated/**/build-info + +.DS_Store \ No newline at end of file diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json new file mode 100644 index 000000000000..ac2ef799e36d --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json @@ -0,0 +1,12 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Add FailurePolicy property to TransferUtilityDownloadDirectoryRequest to allow configuration of failure handling behavior during directory downloads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue downloading remaining files when a failure occurs.", + "Add ObjectDownloadFailedEvent event to TransferUtilityDownloadDirectory to notify users when an individual file download fails during a directory download operation. This event provides details about the failed download, including the original request, the specific file request and the exception encountered." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortOnFailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortOnFailurePolicy.cs new file mode 100644 index 000000000000..258a2cc57301 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortOnFailurePolicy.cs @@ -0,0 +1,72 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Failure policy that cancels all related operations and rethrows the exception when + /// an action fails. + /// + /// + /// Use this policy when any single failure should abort the entire higher-level operation. + /// When an executed under this policy throws, the policy will cancel + /// the provided , invoke an optional failure callback, + /// and then rethrow the exception so the caller can observe the original failure. + /// + internal class AbortOnFailurePolicy : IFailurePolicy + { + /// + /// Executes the provided asynchronous under the abort-on-failure policy. + /// + /// An asynchronous delegate that performs the work to execute under the policy. + /// An optional callback that will be invoked with the exception if fails. + /// A that will be canceled by this policy to signal termination + /// of related work when a failure occurs. + /// + /// A that completes with true when completes successfully. + /// If fails, this method cancels , invokes + /// if provided, and rethrows the original exception; it does not return false. + /// + public async Task ExecuteAsync(Func action, Action onFailure, CancellationTokenSource cancellationTokenSource) + { + try + { + await action().ConfigureAwait(false); + + return true; + } + catch (Exception ex) + { + // Cancel all pending operations before propagating the exception + cancellationTokenSource?.Cancel(); + + onFailure?.Invoke(ex); + + throw; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ContinueOnFailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ContinueOnFailurePolicy.cs new file mode 100644 index 000000000000..1d6b0cfe7f00 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ContinueOnFailurePolicy.cs @@ -0,0 +1,104 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Concurrent; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Failure policy that records exceptions and allows other operations to continue. + /// + /// + /// Use this policy when individual operation failures should not abort the overall + /// download directory operation. Exceptions thrown by the action are captured and + /// stored in the supplied , and an optional + /// onFailure callback is invoked. For cancellation triggered by + /// the provided , cancellation is propagated + /// to callers by rethrowing the . + /// + internal class ContinueOnFailurePolicy : IFailurePolicy + { + private readonly ConcurrentBag _errors; + + /// + /// Initializes a new instance of the class. + /// + /// A used to collect exceptions + /// that occur while executing actions under this policy. Failures are added to this bag + /// so the caller can examine aggregated errors after the overall operation completes. + internal ContinueOnFailurePolicy(ConcurrentBag errors) + { + _errors = errors; + } + + /// + /// Executes and records failures without throwing them, + /// unless the failure is an operation cancellation triggered by the provided + /// . + /// + /// The asynchronous operation to execute under the policy. + /// A callback invoked with the exception when fails. + /// A used to determine and signal cancellation. + /// The policy will rethrow cancellations when the cancellation token was requested. + /// + /// A that completes with true when the action completed successfully. + /// If the action threw a non-cancellation exception, the exception is added to the internal error bag, + /// is invoked if provided, and the method completes with false to indicate + /// the action failed but the policy handled it and allowed processing to continue. + /// + public async Task ExecuteAsync(Func action, Action onFailure, CancellationTokenSource cancellationTokenSource) + { + try + { + await action().ConfigureAwait(false); + + return true; + } + // If the operation was canceled via the provided token, propagate cancellation. + catch (OperationCanceledException ex) when (cancellationTokenSource?.IsCancellationRequested == true) + { + onFailure?.Invoke(ex); + + // Collect the exception for later reporting. + _errors.Add(ex); + + throw; + } +// Disabled warning CA1031 to allow catching all exceptions to continue processing. +#pragma warning disable CA1031 + catch (Exception ex) +#pragma warning restore CA1031 + { + onFailure?.Invoke(ex); + + // Collect the exception for later reporting but don't throw it. + // This allows other downloads to continue processing. + _errors.Add(ex); + + return false; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 5058960d9a06..ab7fe961ad4a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -20,6 +20,7 @@ * */ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Text; @@ -30,11 +31,14 @@ using Amazon.S3.Util; using Amazon.Util.Internal; using Amazon.Runtime; +using Amazon.S3.Transfer.Model; namespace Amazon.S3.Transfer.Internal { internal partial class DownloadDirectoryCommand : BaseCommand { + private IFailurePolicy _failurePolicy; + private ConcurrentBag _errors = new ConcurrentBag(); private readonly IAmazonS3 _s3Client; private readonly TransferUtilityDownloadDirectoryRequest _request; private readonly bool _skipEncryptionInstructionFiles; @@ -52,6 +56,10 @@ internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDir this._s3Client = s3Client; this._request = request; this._skipEncryptionInstructionFiles = s3Client is Amazon.S3.Internal.IAmazonS3Encryption; + _failurePolicy = + request.FailurePolicy == FailurePolicy.AbortOnFailure + ? new AbortOnFailurePolicy() + : new ContinueOnFailurePolicy(_errors); } private void downloadedProgressEventCallback(object sender, WriteObjectProgressArgs e) @@ -107,12 +115,6 @@ internal TransferUtilityDownloadRequest ConstructTransferUtilityDownloadRequest( downloadRequest.IfNoneMatch = this._request.IfNoneMatch; downloadRequest.ResponseHeaderOverrides = this._request.ResponseHeaderOverrides; - //Ensure the target file is a rooted within LocalDirectory. Otherwise error. - if(!InternalSDKUtils.IsFilePathRootedWithDirectoryPath(downloadRequest.FilePath, _request.LocalDirectory)) - { - throw new AmazonClientException($"The file {downloadRequest.FilePath} is not allowed outside of the target directory {_request.LocalDirectory}."); - } - downloadRequest.WriteObjectProgressEvent += downloadedProgressEventCallback; return downloadRequest; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IFailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IFailurePolicy.cs new file mode 100644 index 000000000000..a32a6ef538b2 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IFailurePolicy.cs @@ -0,0 +1,62 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Defines a policy for handling failures when executing asynchronous operations. + /// Implementations encapsulate cancellation behavior for + /// operations that may fail and need controlled continuation or termination. + /// + internal interface IFailurePolicy + { + /// + /// Executes an asynchronous under this failure policy. + /// + /// + /// Implementations of this interface control how failures that occur while running + /// are handled (for example, whether to abort the overall + /// operation, continue on failure, or aggregate errors). When + /// throws or faults, the policy implementation is responsible for invoking + /// with the thrown and for + /// taking any policy-specific cancellation action (for example by calling + /// .Cancel()). + /// + /// The returned completes with true when the + /// completed successfully according to the policy and + /// the caller may proceed. It completes with false when the action failed and + /// the policy handled the failure (the caller should treat this as a failed step). + /// + /// A function that performs the asynchronous work to execute under the policy. + /// A callback that will be invoked with the exception when fails. + /// A the policy may cancel to signal termination of related work. + /// + /// A that resolves to true if the action completed successfully + /// (no failure), or false if the action failed but the policy handled the failure. + /// + Task ExecuteAsync(Func action, Action onFailure, CancellationTokenSource cancellationTokenSource); + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs index 83a828610bd2..a687917f7d9f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs @@ -31,7 +31,7 @@ internal abstract partial class BaseCommand where TResponse : class /// public abstract Task ExecuteAsync(CancellationToken cancellationToken); - protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) where T : class + protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts) where T : class { try { @@ -48,10 +48,6 @@ await command.ExecuteAsync(internalCts.Token) } throw; } - finally - { - throttler.Release(); - } } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index c4edfa090502..cf99100e239e 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -15,19 +15,22 @@ using Amazon.S3.Model; using Amazon.S3.Util; +using Amazon.S3.Transfer.Model; using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; +using Amazon.Runtime; +using Amazon.Util.Internal; namespace Amazon.S3.Transfer.Internal { internal partial class DownloadDirectoryCommand : BaseCommand { - TransferUtilityConfig _config; public bool DownloadFilesConcurrently { get; set; } @@ -82,41 +85,86 @@ public override async Task ExecuteAsyn await asyncThrottler.WaitAsync(cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - cancellationToken.ThrowIfCancellationRequested(); - if (internalCts.IsCancellationRequested) + try { - // Operation cancelled as one of the download requests failed with an exception, - // don't schedule any more download tasks. - // Don't throw an OperationCanceledException here as we want to process the - // responses and throw the original exception. - break; - } + cancellationToken.ThrowIfCancellationRequested(); + if (internalCts.IsCancellationRequested) + { + // Operation cancelled as one of the download requests failed with an exception, + // don't schedule any more download tasks. + // Don't throw an OperationCanceledException here as we want to process the + // responses and throw the original exception. + break; + } - // Valid for serial uploads when - // TransferUtilityDownloadDirectoryRequest.DownloadFilesConcurrently is set to false. - int prefixLength = listRequestPrefix.Length; + // Valid for serial uploads when + // TransferUtilityDownloadDirectoryRequest.DownloadFilesConcurrently is set to false. + int prefixLength = listRequestPrefix.Length; - // If DisableSlashCorrection is enabled (i.e. S3Directory is a key prefix) and it doesn't end with '/' then we need the parent directory to properly construct download path. - if (_request.DisableSlashCorrection && !listRequestPrefix.EndsWith("/")) - { - prefixLength = listRequestPrefix.LastIndexOf("/") + 1; - } + // If DisableSlashCorrection is enabled (i.e. S3Directory is a key prefix) and it doesn't end with '/' then we need the parent directory to properly construct download path. + if (_request.DisableSlashCorrection && !listRequestPrefix.EndsWith("/")) + { + prefixLength = listRequestPrefix.LastIndexOf("/") + 1; + } - this._currentFile = s3o.Key.Substring(prefixLength); + this._currentFile = s3o.Key.Substring(prefixLength); - var downloadRequest = ConstructTransferUtilityDownloadRequest(s3o, prefixLength); - var command = new DownloadCommand(this._s3Client, downloadRequest); + TransferUtilityDownloadRequest downloadRequest = ConstructTransferUtilityDownloadRequest(s3o, prefixLength); - var task = ExecuteCommandAsync(command, internalCts, asyncThrottler); - - pendingTasks.Add(task); + Action onFailure = (ex) => + { + this._request.OnRaiseObjectDownloadFailedEvent( + new ObjectDownloadFailedEventArgs( + this._request, + downloadRequest, + ex)); + }; + + var isValid = await _failurePolicy.ExecuteAsync( + () => { + //Ensure the target file is a rooted within LocalDirectory. Otherwise error. + if(!InternalSDKUtils.IsFilePathRootedWithDirectoryPath(downloadRequest.FilePath, _request.LocalDirectory)) + { + throw new AmazonClientException($"The file {downloadRequest.FilePath} is not allowed outside of the target directory {_request.LocalDirectory}."); + } + + return Task.CompletedTask; + }, + onFailure, + internalCts + ).ConfigureAwait(false); + if (!isValid) continue; + + var task = _failurePolicy.ExecuteAsync( + async () => { + var command = new DownloadCommand(this._s3Client, downloadRequest); + await command.ExecuteAsync(internalCts.Token) + .ConfigureAwait(false); + }, + onFailure, + internalCts + ); + + pendingTasks.Add(task); + } + finally + { + asyncThrottler.Release(); + } } await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); return new TransferUtilityDownloadDirectoryResponse { - ObjectsDownloaded = _numberOfFilesDownloaded + ObjectsDownloaded = _numberOfFilesDownloaded, + ObjectsFailed = _errors.Count, + Errors = _errors.ToList(), + Result = _errors.Count == 0 ? + DirectoryResult.Success : + (_numberOfFilesDownloaded > 0 ? + DirectoryResult.PartialSuccess : + DirectoryResult.Failure) }; } finally diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index b848988a3ca2..ff9f38f42149 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -62,20 +62,27 @@ public override async Task ExecuteAsync( { await loopThrottler.WaitAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); - cancellationToken.ThrowIfCancellationRequested(); - if (internalCts.IsCancellationRequested) + try { - // Operation cancelled as one of the upload requests failed with an exception, - // don't schedule any more upload tasks. - // Don't throw an OperationCanceledException here as we want to process the - // responses and throw the original exception. - break; - } - var uploadRequest = ConstructRequest(basePath, filepath, prefix); - var uploadCommand = _utility.GetUploadCommand(uploadRequest, asyncThrottler); + cancellationToken.ThrowIfCancellationRequested(); + if (internalCts.IsCancellationRequested) + { + // Operation cancelled as one of the upload requests failed with an exception, + // don't schedule any more upload tasks. + // Don't throw an OperationCanceledException here as we want to process the + // responses and throw the original exception. + break; + } + var uploadRequest = ConstructRequest(basePath, filepath, prefix); + var uploadCommand = _utility.GetUploadCommand(uploadRequest, asyncThrottler); - var task = ExecuteCommandAsync(uploadCommand, internalCts, loopThrottler); - pendingTasks.Add(task); + var task = ExecuteCommandAsync(uploadCommand, internalCts); + pendingTasks.Add(task); + } + finally + { + loopThrottler.Release(); + } } await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); diff --git a/sdk/src/Services/S3/Custom/Transfer/Model/DirectoryResult.cs b/sdk/src/Services/S3/Custom/Transfer/Model/DirectoryResult.cs new file mode 100644 index 000000000000..3f8cbd84fb2e --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Model/DirectoryResult.cs @@ -0,0 +1,43 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +namespace Amazon.S3.Transfer.Model +{ + /// + /// Overall outcome of a directory operation. + /// + public enum DirectoryResult + { + /// + /// All objects processed successfully. + /// + Success, + /// + /// Some objects succeeded and some failed. + /// + PartialSuccess, + /// + /// All attempted objects failed. + /// + Failure + } +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Model/FailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/Model/FailurePolicy.cs new file mode 100644 index 000000000000..fbb265ca1103 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Model/FailurePolicy.cs @@ -0,0 +1,40 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +namespace Amazon.S3.Transfer.Model +{ + /// + /// Specifies the policy to apply when a failure occurs during a directory transfer operation. + /// + public enum FailurePolicy + { + /// + /// Abort the directory transfer operation on failure. + /// + AbortOnFailure, + + /// + /// Continue the directory transfer operation despite failures. + /// + ContinueOnFailure + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs index b0556e92487a..12dffa4b2b86 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs @@ -29,6 +29,8 @@ using Amazon.Util; using Amazon.Runtime.Internal; using System.Globalization; +using System.Threading; +using Amazon.S3.Transfer.Model; namespace Amazon.S3.Transfer @@ -56,6 +58,44 @@ public class TransferUtilityDownloadDirectoryRequest private string ifMatch; private string ifNoneMatch; private ResponseHeaderOverrides responseHeaders; + private FailurePolicy failurePolicy = FailurePolicy.AbortOnFailure; + + /// + /// Gets or sets the failure policy for the download directory operation. + /// Determines whether the operation should abort or continue when a failure occurs during download. + /// The default value is . + /// + public FailurePolicy FailurePolicy + { + get { return this.failurePolicy; } + set { this.failurePolicy = value; } + } + + /// + /// Occurs when an individual object fails to download during a DownloadDirectory operation. + /// + /// + /// Subscribers will receive a instance containing + /// the original , the failed + /// , and the exception that caused the failure. + /// This event is raised on a background thread by the transfer utility. + /// + /// + /// request.ObjectDownloadFailedEvent += (sender, args) => + /// { + /// // inspect args.DirectoryRequest, args.ObjectRequest, args.Exception + /// }; + /// + public event EventHandler ObjectDownloadFailedEvent; + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The details of the failed object download. + internal void OnRaiseObjectDownloadFailedEvent(ObjectDownloadFailedEventArgs args) + { + ObjectDownloadFailedEvent?.Invoke(this, args); + } /// /// Gets or sets the name of the bucket. @@ -559,4 +599,74 @@ public override string ToString() this.TotalNumberOfFiles, this.NumberOfFilesDownloaded, this.TotalBytes, this.TransferredBytes); } } -} + + /// + /// Provides data for + /// which is raised when an individual object fails to download during a + /// DownloadDirectory operation. + /// + /// + /// Instances of this class are created by the transfer implementation and + /// passed to event subscribers. The instance contains the original directory + /// download request (), + /// the per-object download request that failed (), + /// and the exception that caused the failure. + /// + /// + /// + /// var request = new TransferUtilityDownloadDirectoryRequest { /* ... */ }; + /// request.ObjectDownloadFailedEvent += (sender, args) => + /// { + /// // args.DirectoryRequest: original directory request + /// // args.ObjectRequest: download request for the failed object + /// // args.Exception: exception thrown during the object download + /// Console.WriteLine($"Failed to download {args.ObjectRequest.Key}: {args.Exception}"); + /// }; + /// + /// + public class ObjectDownloadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The original that initiated the directory download. + /// The representing the individual object download that failed. + /// The that caused the object download to fail. + internal ObjectDownloadFailedEventArgs( + TransferUtilityDownloadDirectoryRequest directoryRequest, + TransferUtilityDownloadRequest objectRequest, + Exception exception) + { + DirectoryRequest = directoryRequest; + ObjectRequest = objectRequest; + Exception = exception; + } + + /// + /// Gets the original that initiated the directory download. + /// + /// + /// The directory-level request that configured the overall DownloadDirectory operation + /// (bucket, prefix, local directory, options, etc.). + /// + public TransferUtilityDownloadDirectoryRequest DirectoryRequest { get; private set; } + + /// + /// Gets the for the individual object that failed to download. + /// + /// + /// Contains per-object parameters such as the S3 key, version id (if set), and the local file path. + /// + public TransferUtilityDownloadRequest ObjectRequest { get; private set; } + + /// + /// Gets the that caused the object download to fail. + /// + /// + /// The exception thrown by the underlying download operation. Can be an , + /// , , or other exception type depending + /// on the failure mode. + /// + public Exception Exception { get; private set; } + } +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs index 098087e26143..1bed1f94ffb2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs @@ -13,7 +13,9 @@ * permissions and limitations under the License. */ -using Amazon.Runtime; +using System; +using System.Collections.Generic; +using Amazon.S3.Transfer.Model; namespace Amazon.S3.Transfer { @@ -23,8 +25,23 @@ namespace Amazon.S3.Transfer public class TransferUtilityDownloadDirectoryResponse { /// - /// The number of objects that have been downloaded + /// The number of objects that have been successfully downloaded. /// public long ObjectsDownloaded { get; set; } + + /// + /// The number of objects that failed to download. Zero if all succeeded. + /// + public long ObjectsFailed { get; set; } + + /// + /// The collection of exceptions encountered when downloading individual objects. + /// + public IList Errors { get; set; } + + /// + /// Overall result of the directory download operation. + /// + public DirectoryResult Result { get; set; } } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs new file mode 100644 index 000000000000..2be179501ae9 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs @@ -0,0 +1,340 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class FailurePolicyTests + { + private static TransferUtilityDownloadDirectoryRequest CreateRequest(string localDir, FailurePolicy policy) + { + return new TransferUtilityDownloadDirectoryRequest + { + BucketName = "test-bucket", + S3Directory = "prefix/", + LocalDirectory = localDir, + FailurePolicy = policy, + DownloadFilesConcurrently = true + }; + } + + private static GetObjectResponse SuccessObject(string bucket, string key, string content = "data") + { + return new GetObjectResponse + { + BucketName = bucket, + Key = key, + ResponseStream = new MemoryStream(Encoding.UTF8.GetBytes(content)), + ContentLength = content.Length + }; + } + + private static Mock CreateMockS3(IEnumerable keys, Func shouldFail) + { + var mock = new Mock(); + + mock.Setup(m => m.Config).Returns(new AmazonS3Config()); + // ListObjectsAsync returns all objects in one page + mock.Setup(m => m.ListObjectsAsync(It.Is(r => r.BucketName == "test-bucket"), It.IsAny())) + .ReturnsAsync(new ListObjectsResponse + { + S3Objects = keys.Select(k => new S3Object { Key = k, Size = 4 }).ToList() + }); + + foreach (var key in keys) + { + if (shouldFail(key)) + { + mock.Setup(m => m.GetObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("Simulated failure for " + key)); + } + else + { + mock.Setup(m => m.GetObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ReturnsAsync(SuccessObject("test-bucket", key)); + } + } + + mock.Setup(m => m.Dispose()); + return mock; + } + + private static string CreateTempDirectory() + { + string dir = Path.Combine(Path.GetTempPath(), "FailurePolicyTests", Guid.NewGuid().ToString()); + Directory.CreateDirectory(dir); + return dir; + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ContinueOnFailure_PartialSuccess() + { + var keys = new[] { "prefix/file1.txt", "prefix/file2.txt", "prefix/file3.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(1, response.Errors.Count); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file3.txt"))); + Assert.IsFalse(File.Exists(Path.Combine(localDir, "file2.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ContinueOnFailure_AllFailures() + { + var keys = new[] { "prefix/fileA.txt", "prefix/fileB.txt" }; + var mockS3 = CreateMockS3(keys, k => true); + string localDir = CreateTempDirectory(); + try + { + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsDownloaded); + Assert.AreEqual(2, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Failure, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(2, response.Errors.Count); + Assert.AreEqual(0, Directory.GetFiles(localDir).Length); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ContinueOnFailure_AllSuccess() + { + var keys = new[] { "prefix/ok1.txt", "prefix/ok2.txt" }; + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "ok1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "ok2.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_AbortOnFailure_ThrowsOnFirstFailure() + { + var keys = new[] { "prefix/first.txt", "prefix/second.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("second.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + var ex = await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("second.txt")); + // first file may or may not have downloaded depending on timing; ensure at least one file attempt occurred + Assert.IsTrue(Directory.GetFiles(localDir).Length <= 1); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_CancelInHandler_ContinueOnFailure_Throws() + { + var keys = new[] { "prefix/file1.txt", "prefix/file2.txt", "prefix/file3.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + // Make sequential to make behavior deterministic for the test. + request.DownloadFilesConcurrently = false; + + bool handlerInvoked = false; + request.ObjectDownloadFailedEvent += (sender, args) => + { + handlerInvoked = true; + throw new AmazonS3Exception("Stop processing immediately"); + }; + + var ex = await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Equals("Stop processing immediately")); + + Assert.IsTrue(handlerInvoked, "ObjectDownloadFailedEvent handler was not invoked."); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_CancelInHandler_AbortOnFailure_Throws() + { + var keys = new[] { "prefix/first.txt", "prefix/second.txt", "prefix/third.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("second.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + request.DownloadFilesConcurrently = false; + + request.ObjectDownloadFailedEvent += (sender, args) => + { + throw new AmazonS3Exception("Stop processing immediately"); + }; + + var ex = await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Equals("Stop processing immediately")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_ArgsContainExpectedData_ContinueOnFailure() + { + var keys = new[] { "prefix/a.txt", "prefix/b.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("b.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + // collect events + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // wait briefly for any background event dispatch + invoked.Wait(1000); + + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(1, captured.Count); + + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("b.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_ArgsContainExpectedData_AbortOnFailure() + { + var keys = new[] { "prefix/x.txt", "prefix/y.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("y.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var tu = new TransferUtility(mockS3.Object); + await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + + // wait for event + invoked.Wait(1000); + + Assert.AreEqual(1, captured.Count); + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("y.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + } +} From 22e871610fb4daa6ef331dc27a54ec18d1f15c04 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 28 Nov 2025 16:22:33 -0500 Subject: [PATCH 28/56] fix test (#4175) --- .../Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs index e10005e8764f..37bf03a2c179 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs @@ -827,7 +827,7 @@ public void Dispose_CanBeCalledMultipleTimes() } [TestMethod] - public async Task Dispose_WithoutPrepare_DoesNotThrow() + public void Dispose_WithoutPrepare_DoesNotThrow() { // Arrange var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( From 18ad664a51f18f7934142a0abc9755e3da90f592 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 1 Dec 2025 14:50:03 -0500 Subject: [PATCH 29/56] Add progress tracking for multi part download to files (#4139) --- .../984a2bde-687f-4ed1-b6eb-03f15b257416.json | 11 + .../Internal/BufferedMultipartStream.cs | 2 +- .../Transfer/Internal/IDownloadManager.cs | 3 +- .../Internal/MultipartDownloadCommand.cs | 87 ++++++ .../Internal/MultipartDownloadManager.cs | 91 ++++++- .../_async/MultipartDownloadCommand.async.cs | 18 +- .../IntegrationTests/TransferUtilityTests.cs | 155 +++++++++++ .../Custom/BufferedMultipartStreamTests.cs | 12 +- .../Custom/MultipartDownloadManagerTests.cs | 257 +++++++++++++++++- 9 files changed, 608 insertions(+), 28 deletions(-) create mode 100644 generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json diff --git a/generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json b/generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json new file mode 100644 index 000000000000..a1fb17cb3107 --- /dev/null +++ b/generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added progress tracking events to multipart download operations" + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs index 0534d3c100a4..f5085a197eea 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs @@ -123,7 +123,7 @@ public async Task InitializeAsync(CancellationToken cancellationToken) _discoveryResult.TotalParts, _discoveryResult.IsSinglePart); - await _downloadCoordinator.StartDownloadsAsync(_discoveryResult, cancellationToken) + await _downloadCoordinator.StartDownloadsAsync(_discoveryResult, null, cancellationToken) .ConfigureAwait(false); _initialized = true; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs index 7bf997608cfc..86f7240988a1 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs @@ -45,9 +45,10 @@ internal interface IDownloadManager : IDisposable /// Starts concurrent downloads with HTTP concurrency control and part range calculations. /// /// Results from the discovery phase. + /// Optional callback for progress tracking events. /// A token to cancel the download operation. /// A task that completes when all downloads finish or an error occurs. - Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken); + Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, EventHandler progressCallback, CancellationToken cancellationToken); /// /// Exception that occurred during downloads, if any. diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs index 820900621e80..0ce97801bd81 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs @@ -36,6 +36,9 @@ internal partial class MultipartDownloadCommand : BaseCommand + /// Fires the DownloadInitiatedEvent to notify subscribers that the download has started. + /// This event is fired exactly once at the beginning of the download operation. + /// + private void FireTransferInitiatedEvent() + { + var transferInitiatedEventArgs = new DownloadInitiatedEventArgs(_request, _request.FilePath); + _request.OnRaiseTransferInitiatedEvent(transferInitiatedEventArgs); + } + + /// + /// Fires the DownloadCompletedEvent to notify subscribers that the download completed successfully. + /// This event is fired exactly once when all parts have been downloaded and assembled. + /// Downloads are complete, so transferred bytes equals total bytes. + /// + /// The unified TransferUtilityDownloadResponse containing S3 metadata + /// The total number of bytes in the file + private void FireTransferCompletedEvent(TransferUtilityDownloadResponse response, long totalBytes) + { + var transferCompletedEventArgs = new DownloadCompletedEventArgs( + _request, + response, + _request.FilePath, + totalBytes, + totalBytes); + _request.OnRaiseTransferCompletedEvent(transferCompletedEventArgs); + } + + /// + /// Fires the DownloadFailedEvent to notify subscribers that the download failed. + /// This event is fired exactly once when an error occurs during the download. + /// Uses the last known transferred bytes from progress tracking. + /// + /// Total file size if known, otherwise -1 + private void FireTransferFailedEvent(long totalBytes = -1) + { + var eventArgs = new DownloadFailedEventArgs( + _request, + _request.FilePath, + System.Threading.Interlocked.Read(ref _lastKnownTransferredBytes), + totalBytes); + _request.OnRaiseTransferFailedEvent(eventArgs); + } + + #endregion + + #region Progress Tracking + + /// + /// Callback for part download progress. + /// Forwards the aggregated progress events from the coordinator to the user's progress callback. + /// The coordinator has already aggregated progress across all concurrent part downloads. + /// Tracks the last known transferred bytes for failure reporting. + /// + /// The event sender (coordinator) + /// Aggregated progress information from the coordinator + internal void DownloadPartProgressEventCallback(object sender, WriteObjectProgressArgs e) + { + // Track last known transferred bytes using Exchange (not Add). + // + // Why Exchange? The coordinator already aggregates increments from concurrent parts: + // Coordinator receives: Part 1: +512 bytes, Part 2: +1024 bytes, Part 3: +768 bytes + // Coordinator aggregates: 0 -> 512 -> 1536 -> 2304 (using Interlocked.Add) + // Coordinator passes to us: e.TransferredBytes = 2304 (pre-aggregated total) + // + // We receive the TOTAL (e.TransferredBytes = 2304), not an increment (+768). + // Using Add here would incorrectly accumulate totals: 0 + 2304 + 2304 + ... = wrong! + // Using Exchange correctly stores the latest total: 2304 (overwrite previous value). + // + // Compare to other commands (SimpleUploadCommand, DownloadCommand) which receive + // INCREMENTS directly from SDK streams and must use Add to accumulate them. + System.Threading.Interlocked.Exchange(ref _lastKnownTransferredBytes, e.TransferredBytes); + + // Set the Request property to enable access to the original download request + e.Request = _request; + + // Forward the coordinator's aggregated progress event to the user + _request.OnRaiseProgressEvent(e); + } + + #endregion } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index e38bf58035ce..0ad53f273ffd 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -54,6 +54,17 @@ internal class MultipartDownloadManager : IDownloadManager private string _savedETag; private int _discoveredPartCount; + // Progress tracking fields for multipart download aggregation + private long _totalTransferredBytes = 0; + private long _totalObjectSize = 0; + private EventHandler _userProgressCallback; + + // Atomic flag to ensure completion event fires exactly once + // Without this, concurrent parts completing simultaneously can both see + // transferredBytes >= _totalObjectSize and fire duplicate completion events + // Uses int instead of bool because Interlocked.CompareExchange requires reference types + private int _completionEventFired = 0; // 0 = false, 1 = true + private Logger Logger { get { return Logger.GetLogger(typeof(TransferUtility)); } @@ -133,13 +144,17 @@ public async Task DiscoverDownloadStrategyAsync(Cancell } /// - public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken) + public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, EventHandler progressCallback, CancellationToken cancellationToken) { ThrowIfDisposed(); if (discoveryResult == null) throw new ArgumentNullException(nameof(discoveryResult)); + // Store for progress aggregation + _userProgressCallback = progressCallback; + _totalObjectSize = discoveryResult.ObjectSize; + Logger.DebugFormat("MultipartDownloadManager: Starting downloads - TotalParts={0}, IsSinglePart={1}", discoveryResult.TotalParts, discoveryResult.IsSinglePart); @@ -151,10 +166,27 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, C // Prepare the data handler (e.g., create temp files for file-based downloads) await _dataHandler.PrepareAsync(discoveryResult, cancellationToken).ConfigureAwait(false); + // Create delegate once and reuse for all parts + var wrappedCallback = progressCallback != null + ? new EventHandler(DownloadPartProgressEventCallback) + : null; + + // Attach progress callback to Part 1's response if provided + if (wrappedCallback != null) + { + discoveryResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; + } + // Process Part 1 from InitialResponse (applies to both single-part and multipart) Logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); + // Detach the event handler after processing to prevent memory leak + if (wrappedCallback != null) + { + discoveryResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; + } + if (discoveryResult.IsSinglePart) { // Single-part: Part 1 is the entire object @@ -169,7 +201,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, C for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) { - var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, internalCts.Token); + var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); downloadTasks.Add(task); } @@ -245,7 +277,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, C - private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, CancellationToken cancellationToken) + private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, EventHandler progressCallback, CancellationToken cancellationToken) { Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNumber); @@ -301,6 +333,12 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Canc } response = await _s3Client.GetObjectAsync(getObjectRequest, cancellationToken).ConfigureAwait(false); + + // Attach progress callback to response if provided + if (progressCallback != null) + { + response.WriteObjectProgressEvent += progressCallback; + } Logger.DebugFormat("MultipartDownloadManager: [Part {0}] GetObject response received - ContentLength={1}", partNumber, response.ContentLength); @@ -553,6 +591,53 @@ internal void ValidateContentRange(GetObjectResponse response, int partNumber, l } } + /// + /// Creates progress args with aggregated values for multipart downloads. + /// + private WriteObjectProgressArgs CreateProgressArgs(long incrementTransferred, long transferredBytes, bool completed = false) + { + string filePath = (_request as TransferUtilityDownloadRequest)?.FilePath; + + return new WriteObjectProgressArgs( + _request.BucketName, + _request.Key, + filePath, + _request.VersionId, + incrementTransferred, + transferredBytes, + _totalObjectSize, + completed + ); + } + + /// + /// Progress aggregation callback that combines progress across all concurrent part downloads. + /// Uses thread-safe counter increment to handle concurrent updates. + /// Detects completion naturally when transferred bytes reaches total size. + /// Uses atomic flag to ensure completion event fires exactly once. + /// + private void DownloadPartProgressEventCallback(object sender, WriteObjectProgressArgs e) + { + long transferredBytes = Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred); + + // Use atomic CompareExchange to ensure only first thread fires completion + bool isComplete = false; + if (transferredBytes >= _totalObjectSize) + { + // CompareExchange returns the original value before the exchange + // If original value was 0 (false), we're the first thread and should fire completion + int originalValue = Interlocked.CompareExchange(ref _completionEventFired, 1, 0); + if (originalValue == 0) // Was false, now set to true + { + isComplete = true; + } + } + + // Create and fire aggregated progress event + var aggregatedArgs = CreateProgressArgs(e.IncrementTransferred, transferredBytes, isComplete); + _userProgressCallback?.Invoke(this, aggregatedArgs); + } + private void ThrowIfDisposed() { if (_disposed) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs index 23b423410bca..2752e801de3a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs @@ -33,6 +33,9 @@ public override async Task ExecuteAsync(Cancell { // Validate request parameters ValidateRequest(); + + // Fire initiated event before starting any network operations + FireTransferInitiatedEvent(); // Create configuration from request settings var config = CreateConfiguration(); @@ -54,6 +57,7 @@ public override async Task ExecuteAsync(Cancell dataHandler, RequestEventHandler)) { + long totalBytes = -1; try { // Step 1: Discover download strategy (PART or RANGE) and get metadata @@ -61,12 +65,15 @@ public override async Task ExecuteAsync(Cancell var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(cancellationToken) .ConfigureAwait(false); + totalBytes = discoveryResult.ObjectSize; + + Logger.DebugFormat("MultipartDownloadCommand: Discovered {0} part(s), total size: {1} bytes, IsSinglePart={2}", discoveryResult.TotalParts, discoveryResult.ObjectSize, discoveryResult.IsSinglePart); // Step 2: Start concurrent downloads for all parts - Logger.DebugFormat("MultipartDownloadCommand: Starting downloads for {0} part(s)", discoveryResult.TotalParts); - await coordinator.StartDownloadsAsync(discoveryResult, cancellationToken) + Logger.DebugFormat("Starting downloads for {0} part(s)", discoveryResult.TotalParts); + await coordinator.StartDownloadsAsync(discoveryResult, DownloadPartProgressEventCallback, cancellationToken) .ConfigureAwait(false); // Step 2b: Wait for all downloads to complete before returning @@ -110,11 +117,18 @@ await coordinator.StartDownloadsAsync(discoveryResult, cancellationToken) mappedResponse.ChecksumSHA256 = null; } + // Fire completed event + FireTransferCompletedEvent(mappedResponse, totalBytes); + return mappedResponse; } catch (Exception ex) { Logger.Error(ex, "Exception during multipart download"); + + // Fire failed event + FireTransferFailedEvent(totalBytes); + throw; } } diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 379c98fe710e..fda91a96e6a7 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -1426,6 +1426,161 @@ public void SimpleDownloadCompleteLifecycleTest() completedValidator.AssertEventFired(); } + [TestMethod] + [TestCategory("S3")] + public async Task MultipartDownloadProgressTest() + { + var fileName = UtilityMethods.GenerateName("MultipartDownloadProgress"); + var originalFilePath = Path.Combine(BasePath, fileName); + var downloadedFilePath = originalFilePath + ".dn"; + + // Upload a large file (20MB to ensure multipart) + UtilityMethods.GenerateFile(originalFilePath, 20 * MEG_SIZE); + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = fileName, + FilePath = originalFilePath + }); + + int inProgressEventCount = 0; + int completedEventCount = 0; + long lastTransferredBytes = 0; + + var progressValidator = new TransferProgressValidator + { + ValidateProgressInterval = true, // Enable interval validation to ensure events fire + Validate = (p) => + { + Assert.AreEqual(bucketName, p.BucketName); + Assert.AreEqual(fileName, p.Key); + Assert.IsNotNull(p.FilePath); + Assert.IsTrue(p.TransferredBytes >= lastTransferredBytes); + + if (p.IsCompleted) + { + completedEventCount++; + Assert.AreEqual(p.TotalBytes, p.TransferredBytes); + } + else + { + inProgressEventCount++; + Assert.IsTrue(p.TransferredBytes < p.TotalBytes); + } + + lastTransferredBytes = p.TransferredBytes; + } + }; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = fileName + }; + request.WriteObjectProgressEvent += progressValidator.OnProgressEvent; + + // Use DownloadWithResponseAsync to trigger MultipartDownloadCommand + var response = await transferUtility.DownloadWithResponseAsync(request); + + progressValidator.AssertOnCompletion(); + + // Validate that in-progress events actually fired during the download + Assert.IsTrue(inProgressEventCount > 0, + $"Expected in-progress events to fire during multipart download, but got {inProgressEventCount}"); + Assert.AreEqual(1, completedEventCount); + + Assert.IsNotNull(response); + UtilityMethods.CompareFiles(originalFilePath, downloadedFilePath); + } + + [TestMethod] + [TestCategory("S3")] + public async Task MultipartDownloadInitiatedCompletedEventsTest() + { + var fileName = UtilityMethods.GenerateName("MultipartDownloadEvents"); + var originalFilePath = Path.Combine(BasePath, fileName); + var downloadedFilePath = originalFilePath + ".dn"; + long expectedSize = 20 * MEG_SIZE; + + // Upload large file + UtilityMethods.GenerateFile(originalFilePath, expectedSize); + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = fileName, + FilePath = originalFilePath + }); + + bool initiatedEventFired = false; + bool completedEventFired = false; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = fileName + }; + + request.DownloadInitiatedEvent += (s, e) => + { + Assert.IsFalse(initiatedEventFired, "Initiated event should fire only once"); + initiatedEventFired = true; + Assert.AreEqual(fileName, e.Request.Key); + }; + + request.DownloadCompletedEvent += (s, e) => + { + Assert.IsFalse(completedEventFired, "Completed event should fire only once"); + completedEventFired = true; + Assert.AreEqual(expectedSize, e.TotalBytes); + Assert.AreEqual(expectedSize, e.TransferredBytes); + }; + + var response = await transferUtility.DownloadWithResponseAsync(request); + + Assert.IsTrue(initiatedEventFired, "Initiated event should have fired"); + Assert.IsTrue(completedEventFired, "Completed event should have fired"); + Assert.IsNotNull(response); + } + + [TestMethod] + [TestCategory("S3")] + public async Task MultipartDownloadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName("MultipartDownloadFailed"); + var downloadedFilePath = Path.Combine(BasePath, fileName + ".dn"); + + bool failedEventFired = false; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = "non-existent-key-" + Guid.NewGuid() // Intentionally non-existent + }; + + request.DownloadFailedEvent += (s, e) => + { + failedEventFired = true; + Assert.IsNotNull(e.FilePath); + }; + + try + { + await transferUtility.DownloadWithResponseAsync(request); + Assert.Fail("Expected an exception to be thrown for non-existent key"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + Assert.IsTrue(failedEventFired, "Failed event should have fired"); + } + } + void Download(string fileName, long size, TransferProgressValidator progressValidator) { var key = fileName; diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs index d85104e0ad99..e56a20cedce4 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs @@ -73,7 +73,7 @@ private async Task CreateInitializedStreamAsync( _mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) .ReturnsAsync(discoveryResult); - _mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + _mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) .Returns(Task.CompletedTask); var stream = CreateStream(); @@ -199,7 +199,7 @@ public async Task InitializeAsync_SinglePart_CallsStartDownloads() var mockCoordinator = new Mock(); mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) .ReturnsAsync(discoveryResult); - mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) .Returns(Task.CompletedTask); var mockBufferManager = new Mock(); @@ -211,7 +211,7 @@ public async Task InitializeAsync_SinglePart_CallsStartDownloads() // Assert mockCoordinator.Verify( - x => x.StartDownloadsAsync(discoveryResult, It.IsAny()), + x => x.StartDownloadsAsync(discoveryResult, It.IsAny>(), It.IsAny()), Times.Once); } @@ -233,7 +233,7 @@ public async Task InitializeAsync_Multipart_UsesMultipartHandler() var mockCoordinator = new Mock(); mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) .ReturnsAsync(discoveryResult); - mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) .Returns(Task.CompletedTask); var mockBufferManager = new Mock(); @@ -261,7 +261,7 @@ public async Task InitializeAsync_Multipart_StartsDownloads() var mockCoordinator = new Mock(); mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) .ReturnsAsync(discoveryResult); - mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) .Returns(Task.CompletedTask); var mockBufferManager = new Mock(); @@ -273,7 +273,7 @@ public async Task InitializeAsync_Multipart_StartsDownloads() // Assert mockCoordinator.Verify( - x => x.StartDownloadsAsync(discoveryResult, It.IsAny()), + x => x.StartDownloadsAsync(discoveryResult, It.IsAny>(), It.IsAny()), Times.Once); } diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index 41bb77ce57be..49bd3b794adc 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -21,7 +21,59 @@ private Mock CreateMockDataHandler() { var mockHandler = new Mock(); mockHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) - .Returns(Task.CompletedTask); + .Returns(async (partNumber, response, ct) => + { + // Simulate reading the stream and firing progress events + // This mimics the real S3 SDK behavior where WriteObjectProgressEvent is fired as data is read + if (response?.ResponseStream != null) + { + var buffer = new byte[8192]; + long totalBytesRead = 0; + long accumulatedBytes = 0; // Accumulate bytes until threshold is reached + int bytesRead; + + // DefaultProgressUpdateInterval is 102400 bytes (100KB) + const long progressThreshold = 102400; + + while ((bytesRead = response.ResponseStream.Read(buffer, 0, buffer.Length)) > 0) + { + totalBytesRead += bytesRead; + accumulatedBytes += bytesRead; + + // Fire progress event when accumulated bytes exceed threshold + // This matches real S3 SDK behavior which throttles progress events + if (accumulatedBytes >= progressThreshold) + { + response.OnRaiseProgressEvent( + null, // filePath + accumulatedBytes, // incrementTransferred + totalBytesRead, // transferred + response.ContentLength, // total + false); // completed + accumulatedBytes = 0; // Reset accumulator after firing event + } + } + + // Fire final event with any remaining bytes + if (accumulatedBytes > 0) + { + response.OnRaiseProgressEvent( + null, // filePath + accumulatedBytes, // incrementTransferred + totalBytesRead, // transferred + response.ContentLength, // total + false); // completed + } + } + + // Give background events time to fire before response is disposed + // OnRaiseProgressEvent uses AWSSDKUtils.InvokeInBackground which queues work on ThreadPool + // Use Thread.Sleep to block and force ThreadPool to execute queued work + Thread.Sleep(500); + + // Additional yield to ensure all queued work completes + await Task.Yield(); + }); mockHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(Task.CompletedTask); mockHandler.Setup(x => x.ReleaseCapacity()); @@ -29,6 +81,39 @@ private Mock CreateMockDataHandler() return mockHandler; } + /// + /// Helper method to wait for async progress events to complete. + /// Polls until expected bytes are transferred or timeout occurs. + /// + private async Task WaitForProgressEventsAsync( + List progressEvents, + object progressLock, + long expectedBytes, + int timeoutMs = 5000) + { + var startTime = DateTime.UtcNow; + + while ((DateTime.UtcNow - startTime).TotalMilliseconds < timeoutMs) + { + lock (progressLock) + { + if (progressEvents.Count > 0) + { + var lastEvent = progressEvents.Last(); + if (lastEvent.TransferredBytes >= expectedBytes) + { + return true; + } + } + } + + // Small delay between checks + await Task.Delay(10); + } + + return false; + } + #region Constructor Tests [TestMethod] @@ -492,7 +577,7 @@ public async Task StartDownloadsAsync_SinglePart_ReturnsImmediately() var mockBufferManager = new Mock(); // Act - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); // Assert - should complete without any downloads mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Never); @@ -509,7 +594,7 @@ public async Task StartDownloadsAsync_WithNullDiscoveryResult_ThrowsArgumentNull var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - await coordinator.StartDownloadsAsync(null, CancellationToken.None); + await coordinator.StartDownloadsAsync(null, null, CancellationToken.None); } #endregion @@ -532,7 +617,7 @@ public async Task Validation_Failures_ThrowInvalidOperationException( var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); // Act & Assert (exception expected via attribute) - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } @@ -556,7 +641,7 @@ public async Task Validation_ETag_Matching_Succeeds() var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); // Act - should succeed with matching ETags - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); // Assert - no exception thrown } @@ -598,7 +683,7 @@ public async Task Validation_ContentRange_ValidRange_Succeeds() var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); // Act - should succeed with valid ranges - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); // Assert - no exception thrown } @@ -739,7 +824,7 @@ public async Task StartDownloadsAsync_WhenCancelledBeforeStart_ThrowsOperationCa cts.Cancel(); // Act - await coordinator.StartDownloadsAsync(discoveryResult, cts.Token); + await coordinator.StartDownloadsAsync(discoveryResult, null, cts.Token); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } @@ -780,7 +865,7 @@ public async Task StartDownloadsAsync_WhenCancelledDuringDownloads_NotifiesBuffe // Act try { - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } catch (OperationCanceledException) @@ -825,7 +910,7 @@ public async Task StartDownloadsAsync_WhenCancelled_SetsDownloadException() // Act try { - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } catch (OperationCanceledException) @@ -859,7 +944,7 @@ public async Task StartDownloadsAsync_PassesCancellationTokenToBufferManager() var cts = new CancellationTokenSource(); // Act - await coordinator.StartDownloadsAsync(discoveryResult, cts.Token); + await coordinator.StartDownloadsAsync(discoveryResult, null, cts.Token); // Assert - The cancellation token was passed through to the data handler Assert.IsNotNull(discoveryResult); @@ -885,7 +970,7 @@ public async Task StartDownloadsAsync_SinglePart_DoesNotThrowOnCancellation() cts.Cancel(); // Act - should complete without throwing even though token is cancelled - await coordinator.StartDownloadsAsync(discoveryResult, cts.Token); + await coordinator.StartDownloadsAsync(discoveryResult, null, cts.Token); // Assert - no exception thrown, no S3 calls made mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Never); @@ -936,7 +1021,7 @@ public async Task StartDownloadsAsync_CancellationPropagatesAcrossConcurrentDown // Act try { - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } catch (OperationCanceledException) @@ -1013,7 +1098,7 @@ public async Task StartDownloadsAsync_RangeStrategy_CancellationDuringDownloads( var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); // Act - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } @@ -1068,7 +1153,7 @@ public async Task StartDownloadsAsync_ReturnsImmediately_PreventsDeadlock() // Act - StartDownloadsAsync should return immediately (not wait for all downloads) var stopwatch = System.Diagnostics.Stopwatch.StartNew(); - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); stopwatch.Stop(); // Assert - StartDownloadsAsync should return almost immediately @@ -1117,7 +1202,7 @@ public async Task StartDownloadsAsync_SinglePart_ReturnsImmediatelyWithoutBackgr // Act var stopwatch = System.Diagnostics.Stopwatch.StartNew(); - await coordinator.StartDownloadsAsync(discoveryResult, CancellationToken.None); + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); stopwatch.Stop(); // DownloadCompletionTask should be completed immediately (no background work) @@ -1619,5 +1704,147 @@ public void ValidateContentRange_RangeStrategy_ExceptionMessage_ContainsExpected } #endregion + + #region Progress Callback Tests + + [TestMethod] + public async Task ProgressCallback_ConcurrentCompletion_FiresOnlyOneCompletionEvent() + { + // Arrange - Simulate 3 parts completing simultaneously + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Track all progress events + var progressEvents = new List(); + var progressLock = new object(); + + EventHandler progressCallback = (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + // Create mock responses that simulate concurrent completion + var firstPartResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag"); + + var secondPartResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + partSize, totalParts, + $"bytes {partSize}-{2 * partSize - 1}/{totalObjectSize}", + "test-etag"); + + var thirdPartResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + partSize, totalParts, + $"bytes {2 * partSize}-{totalObjectSize - 1}/{totalObjectSize}", + "test-etag"); + + int callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) return Task.FromResult(firstPartResponse); + if (callCount == 2) return Task.FromResult(secondPartResponse); + return Task.FromResult(thirdPartResponse); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration( + concurrentRequests: 3); // Allow all parts to complete simultaneously + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, progressCallback, CancellationToken.None); + + // Wait for async progress events to complete + var success = await WaitForProgressEventsAsync(progressEvents, progressLock, totalObjectSize); + Assert.IsTrue(success, "Timed out waiting for progress events to complete"); + + // Assert - Verify only ONE completion event fired (IsCompleted=true) + lock (progressLock) + { + var completionEvents = progressEvents.Where(e => e.PercentDone == 100 && e.TransferredBytes == totalObjectSize).ToList(); + + // There should be at least one event at 100% + Assert.IsTrue(completionEvents.Count > 0, "Expected at least one progress event at 100%"); + + // But only ONE should have been fired with the atomic flag logic + // (Note: Due to the buffering and event timing, we might see multiple events at 100%, + // but the key is that the completion logic only fired once) + Assert.IsTrue(progressEvents.Count > 0, "Expected progress events to be fired"); + + // Verify we reached 100% completion + var finalEvent = progressEvents.Last(); + Assert.AreEqual(100, finalEvent.PercentDone, "Expected final progress to be 100%"); + Assert.AreEqual(totalObjectSize, finalEvent.TransferredBytes, "Expected all bytes transferred"); + } + } + + [TestMethod] + public async Task ProgressCallback_MultiplePartsComplete_AggregatesCorrectly() + { + // Arrange - Test progress aggregation across multiple parts + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var progressEvents = new List(); + var progressLock = new object(); + + EventHandler progressCallback = (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, progressCallback, CancellationToken.None); + + // Wait for async progress events to complete + var success = await WaitForProgressEventsAsync(progressEvents, progressLock, totalObjectSize); + Assert.IsTrue(success, "Timed out waiting for progress events to complete"); + + // Assert + lock (progressLock) + { + // Should have received progress events + Assert.IsTrue(progressEvents.Count > 0, "Expected progress events"); + + // Final event should show 100% completion + var finalEvent = progressEvents.Last(); + Assert.AreEqual(totalObjectSize, finalEvent.TransferredBytes, "Expected all bytes transferred"); + Assert.AreEqual(100, finalEvent.PercentDone, "Expected 100% completion"); + + // TransferredBytes should only increase (monotonic) + long lastTransferred = 0; + foreach (var evt in progressEvents) + { + Assert.IsTrue(evt.TransferredBytes >= lastTransferred, + "TransferredBytes should be monotonically increasing"); + lastTransferred = evt.TransferredBytes; + } + } + } + + #endregion } } From 232883a043f212f8f9f0c521623836fb75413744 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 1 Dec 2025 17:07:14 -0500 Subject: [PATCH 30/56] Add DownloadDirectoryWithResponse (#4141) --- .../55fe9e14-c79e-4426-9828-deae0451d4f6.json | 11 + .../Internal/DownloadDirectoryCommand.cs | 17 +- .../Transfer/Internal/FilePartDataHandler.cs | 10 +- .../Internal/MultipartDownloadCommand.cs | 17 +- .../Internal/MultipartDownloadManager.cs | 82 +- .../_async/MultipartDownloadCommand.async.cs | 4 +- .../DownloadDirectoryCommand.cs | 41 +- .../UploadDirectoryCommand.cs | 39 +- .../Transfer/_async/ITransferUtility.async.cs | 35 + .../Transfer/_async/TransferUtility.async.cs | 23 + .../ITransferUtility.async.cs | 14 + .../_bcl+netstandard/ITransferUtility.sync.cs | 44 + .../_bcl+netstandard/TransferUtility.sync.cs | 28 + ...ilityDownloadDirectoryWithResponseTests.cs | 632 +++++++++++ .../Custom/DownloadDirectoryCommandTests.cs | 994 ++++++++++++++++++ .../Custom/MultipartDownloadCommandTests.cs | 188 ++++ 16 files changed, 2146 insertions(+), 33 deletions(-) create mode 100644 generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json create mode 100644 sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryWithResponseTests.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs diff --git a/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json b/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json new file mode 100644 index 000000000000..0416619a5fb5 --- /dev/null +++ b/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Created new DownloadDirectoryWithResponseAsync methods on the Amazon.S3.Transfer.TransferUtility class. The new operations support downloading directories using multipart download for files and return response metadata." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index ab7fe961ad4a..d932a282b137 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -42,6 +42,7 @@ internal partial class DownloadDirectoryCommand : BaseCommand + /// Initializes a new instance for file downloads. + /// Writes parts directly to disk without memory buffering. + /// public FilePartDataHandler(FileDownloadConfiguration config) { _config = config ?? throw new ArgumentNullException(nameof(config)); @@ -90,14 +94,16 @@ await WritePartToFileAsync(offset, response, cancellationToken) /// public Task WaitForCapacityAsync(CancellationToken cancellationToken) { - // No backpressure needed - OS handles concurrent file access + // No-op: FilePartDataHandler writes directly to disk without buffering parts in memory. + // Memory throttling is only needed for BufferedPartDataHandler which keeps parts in memory. return Task.CompletedTask; } /// public void ReleaseCapacity() { - // No-op + // No-op: FilePartDataHandler writes directly to disk without buffering parts in memory. + // Memory throttling is only needed for BufferedPartDataHandler which keeps parts in memory. } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs index 0ce97801bd81..809c00c2d3dc 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs @@ -20,6 +20,7 @@ * */ using System; +using System.Threading; using Amazon.Runtime.Internal.Util; using Amazon.S3.Model; using Amazon.S3.Util; @@ -36,6 +37,7 @@ internal partial class MultipartDownloadCommand : BaseCommand - /// Initializes a new instance of the MultipartDownloadCommand class. + /// Initializes a new instance of the MultipartDownloadCommand class for single file downloads. /// /// The S3 client to use for downloads. /// The download request containing configuration. /// The TransferUtility configuration. internal MultipartDownloadCommand(IAmazonS3 s3Client, TransferUtilityDownloadRequest request, TransferUtilityConfig config) + : this(s3Client, request, config, null) + { + } + + /// + /// Initializes a new instance of the MultipartDownloadCommand class for directory downloads. + /// + /// The S3 client to use for downloads. + /// The download request containing configuration. + /// The TransferUtility configuration. + /// Shared HTTP concurrency throttler for directory operations, or null for single file downloads. + internal MultipartDownloadCommand(IAmazonS3 s3Client, TransferUtilityDownloadRequest request, TransferUtilityConfig config, SemaphoreSlim sharedHttpThrottler) { _s3Client = s3Client ?? throw new ArgumentNullException(nameof(s3Client)); _request = request ?? throw new ArgumentNullException(nameof(request)); _config = config ?? throw new ArgumentNullException(nameof(config)); + _sharedHttpThrottler = sharedHttpThrottler; // Can be null for single file downloads } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 0ad53f273ffd..f56b45195e06 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -44,6 +44,7 @@ internal class MultipartDownloadManager : IDownloadManager private readonly DownloadManagerConfiguration _config; private readonly IPartDataHandler _dataHandler; private readonly SemaphoreSlim _httpConcurrencySlots; + private readonly bool _ownsHttpThrottler; private readonly RequestEventHandler _requestEventHandler; private Exception _downloadException; @@ -79,15 +80,64 @@ private Logger Logger public Task DownloadCompletionTask => _downloadCompletionTask ?? Task.CompletedTask; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the for single file downloads. + /// This constructor creates and owns its own HTTP concurrency throttler based on the configuration. /// - /// The client for making S3 requests. - /// The containing download parameters. - /// The with download settings. - /// The for processing downloaded parts. - /// Optional for user agent tracking. - /// Thrown when any required parameter is null. + /// The client used to make GetObject requests to S3. + /// The containing bucket, key, version, and download strategy configuration. + /// The specifying concurrency limits and part size settings. + /// The responsible for buffering and processing downloaded part data. + /// Optional request event handler for adding custom headers or tracking requests. May be null. + /// + /// Thrown when , , , or is null. + /// + /// + /// This constructor is used for single file downloads where each download manages its own HTTP concurrency. + /// The created throttler will be disposed when this instance is disposed. + /// For directory downloads with shared concurrency management, use the overload that accepts a shared throttler. + /// + /// + /// + /// public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler = null) + : this(s3Client, request, config, dataHandler, requestEventHandler, null) + { + } + + /// + /// Initializes a new instance of the for directory downloads or scenarios requiring shared concurrency control. + /// This constructor allows using a shared HTTP concurrency throttler across multiple concurrent file downloads. + /// + /// The client used to make GetObject requests to S3. + /// The containing bucket, key, version, and download strategy configuration. + /// The specifying concurrency limits and part size settings. + /// The responsible for buffering and processing downloaded part data. + /// Optional request event handler for adding custom headers or tracking requests. May be null. + /// + /// Optional shared for coordinating HTTP concurrency across multiple downloads. + /// If null, a new throttler will be created and owned by this instance. + /// If provided, the caller retains ownership and responsibility for disposal. + /// + /// + /// Thrown when , , , or is null. + /// + /// + /// + /// This constructor is typically used by directory download operations where multiple files are being downloaded + /// concurrently and need to share a global HTTP concurrency limit. + /// + /// + /// Resource Ownership: + /// If is provided, this instance does NOT take ownership and will NOT dispose it. + /// If is null, this instance creates and owns the throttler and will dispose it. + /// + /// + /// + /// + /// + /// + /// + public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler, SemaphoreSlim sharedHttpThrottler) { _s3Client = s3Client ?? throw new ArgumentNullException(nameof(s3Client)); _request = request ?? throw new ArgumentNullException(nameof(request)); @@ -95,7 +145,17 @@ public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, _dataHandler = dataHandler ?? throw new ArgumentNullException(nameof(dataHandler)); _requestEventHandler = requestEventHandler; - _httpConcurrencySlots = new SemaphoreSlim(_config.ConcurrentServiceRequests); + // Use shared throttler if provided, otherwise create our own + if (sharedHttpThrottler != null) + { + _httpConcurrencySlots = sharedHttpThrottler; + _ownsHttpThrottler = false; // Don't dispose - directory command owns it + } + else + { + _httpConcurrencySlots = new SemaphoreSlim(_config.ConcurrentServiceRequests); + _ownsHttpThrottler = true; // We own it, so we dispose it + } } /// @@ -654,7 +714,11 @@ public void Dispose() { try { - _httpConcurrencySlots?.Dispose(); + // Only dispose HTTP throttler if we own it + if (_ownsHttpThrottler) + { + _httpConcurrencySlots?.Dispose(); + } _dataHandler?.Dispose(); } catch (Exception) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs index 2752e801de3a..11a4e8ad8f45 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs @@ -50,12 +50,14 @@ public override async Task ExecuteAsync(Cancell using (var dataHandler = new FilePartDataHandler(config)) { // Create coordinator to manage the download process + // Pass shared HTTP throttler to control concurrency across files using (var coordinator = new MultipartDownloadManager( _s3Client, _request, config, dataHandler, - RequestEventHandler)) + RequestEventHandler, + _sharedHttpThrottler)) { long totalBytes = -1; try diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index cf99100e239e..11c210a95cd9 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -66,15 +66,35 @@ public override async Task ExecuteAsyn this._totalNumberOfFilesToDownload = objs.Count; - SemaphoreSlim asyncThrottler = null; + // Two-level throttling architecture: + // 1. File-level throttler: Controls how many files are downloaded concurrently + // 2. HTTP-level throttler: Controls total HTTP requests across ALL file downloads + // + // Example with ConcurrentServiceRequests = 10: + // - fileOperationThrottler = 10: Up to 10 files can download simultaneously + // - sharedHttpRequestThrottler = 10: All 10 files share 10 total HTTP request slots + // - Without HTTP throttler: Would result in 10 files × 10 parts = 100 concurrent HTTP requests + // - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files + // + // This prevents resource exhaustion when downloading many large files with multipart downloads. + SemaphoreSlim fileOperationThrottler = null; + SemaphoreSlim sharedHttpRequestThrottler = null; CancellationTokenSource internalCts = null; try { - asyncThrottler = DownloadFilesConcurrently ? + // File-level throttler: Controls concurrent file operations + fileOperationThrottler = DownloadFilesConcurrently ? new SemaphoreSlim(this._config.ConcurrentServiceRequests) : new SemaphoreSlim(1); + // HTTP-level throttler: Shared across all downloads to control total HTTP concurrency + // Only needed for multipart downloads where each file makes multiple HTTP requests + if (this._useMultipartDownload) + { + sharedHttpRequestThrottler = new SemaphoreSlim(this._config.ConcurrentServiceRequests); + } + internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); var pendingTasks = new List(); foreach (S3Object s3o in objs) @@ -82,7 +102,7 @@ public override async Task ExecuteAsyn if (s3o.Key.EndsWith("/", StringComparison.Ordinal)) continue; - await asyncThrottler.WaitAsync(cancellationToken) + await fileOperationThrottler.WaitAsync(cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); try @@ -137,7 +157,15 @@ await asyncThrottler.WaitAsync(cancellationToken) var task = _failurePolicy.ExecuteAsync( async () => { - var command = new DownloadCommand(this._s3Client, downloadRequest); + BaseCommand command; + if (this._useMultipartDownload) + { + command = new MultipartDownloadCommand(this._s3Client, downloadRequest, this._config, sharedHttpRequestThrottler); + } + else + { + command = new DownloadCommand(this._s3Client, downloadRequest); + } await command.ExecuteAsync(internalCts.Token) .ConfigureAwait(false); }, @@ -149,7 +177,7 @@ await command.ExecuteAsync(internalCts.Token) } finally { - asyncThrottler.Release(); + fileOperationThrottler.Release(); } } await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) @@ -170,7 +198,8 @@ await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) finally { internalCts.Dispose(); - asyncThrottler.Dispose(); + fileOperationThrottler.Dispose(); + sharedHttpRequestThrottler?.Dispose(); } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index ff9f38f42149..07c71c27363f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -37,30 +37,44 @@ public override async Task ExecuteAsync( .ConfigureAwait(continueOnCapturedContext: false); this._totalNumberOfFiles = filePaths.Length; - SemaphoreSlim asyncThrottler = null; - SemaphoreSlim loopThrottler = null; + // Two-level throttling architecture: + // 1. File-level throttler: Controls how many files are uploaded concurrently + // 2. HTTP-level throttler: Controls total HTTP requests across ALL file uploads + // + // Example with ConcurrentServiceRequests = 10: + // - fileOperationThrottler = 10: Up to 10 files can upload simultaneously + // - sharedHttpRequestThrottler = 10: All 10 files share 10 total HTTP request slots + // - Without HTTP throttler: Would result in 10 files × 10 parts = 100 concurrent HTTP requests + // - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files + // + // This prevents resource exhaustion when uploading many large files with multipart uploads. + SemaphoreSlim sharedHttpRequestThrottler = null; + SemaphoreSlim fileOperationThrottler = null; CancellationTokenSource internalCts = null; try { var pendingTasks = new List(); - loopThrottler = UploadFilesConcurrently ? + + // File-level throttler: Controls concurrent file operations + fileOperationThrottler = UploadFilesConcurrently ? new SemaphoreSlim(this._config.ConcurrentServiceRequests) : new SemaphoreSlim(1); - asyncThrottler = this._utility.S3Client is Amazon.S3.Internal.IAmazonS3Encryption ? - // If we are using AmazonS3EncryptionClient, don't set the async throttler. - // The loopThrottler will be used to control how many files are uploaded in parallel. + // HTTP-level throttler: Shared across all uploads to control total HTTP concurrency + sharedHttpRequestThrottler = this._utility.S3Client is Amazon.S3.Internal.IAmazonS3Encryption ? + // If we are using AmazonS3EncryptionClient, don't set the HTTP throttler. + // The fileOperationThrottler will be used to control how many files are uploaded in parallel. // Each upload (multipart) will upload parts serially. null : // Use a throttler which will be shared between simple and multipart uploads - // to control concurrent IO. + // to control total concurrent HTTP requests across all file operations. new SemaphoreSlim(this._config.ConcurrentServiceRequests); internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); foreach (string filepath in filePaths) { - await loopThrottler.WaitAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); + await fileOperationThrottler.WaitAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); try { @@ -74,14 +88,14 @@ public override async Task ExecuteAsync( break; } var uploadRequest = ConstructRequest(basePath, filepath, prefix); - var uploadCommand = _utility.GetUploadCommand(uploadRequest, asyncThrottler); + var uploadCommand = _utility.GetUploadCommand(uploadRequest, sharedHttpRequestThrottler); var task = ExecuteCommandAsync(uploadCommand, internalCts); pendingTasks.Add(task); } finally { - loopThrottler.Release(); + fileOperationThrottler.Release(); } } await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) @@ -90,9 +104,8 @@ await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) finally { internalCts.Dispose(); - loopThrottler.Dispose(); - if (asyncThrottler != null) - asyncThrottler.Dispose(); + fileOperationThrottler.Dispose(); + sharedHttpRequestThrottler?.Dispose(); } return new TransferUtilityUploadDirectoryResponse(); diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs index bb01d8094c9f..74bcc619b7f4 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs @@ -432,6 +432,41 @@ public partial interface ITransferUtility : IDisposable /// The task object representing the asynchronous operation with download response metadata. Task DownloadWithResponseAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by s3Directory and returns response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// The name of the bucket containing the Amazon S3 objects to download. + /// + /// + /// The directory in Amazon S3 to download. + /// + /// + /// The local directory to download the objects to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadDirectoryWithResponseAsync(string bucketName, string s3Directory, string localDirectory, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by the S3Directory property and returns response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// Contains all the parameters required to download objects from Amazon S3 + /// into a local directory. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadDirectoryWithResponseAsync(TransferUtilityDownloadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion #region OpenStream diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index c48a44494ea1..88d36cba3dff 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -232,6 +232,29 @@ public partial class TransferUtility : ITransferUtility #endregion + #region DownloadDirectory + + /// + public async Task DownloadDirectoryWithResponseAsync(string bucketName, string s3Directory, string localDirectory, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructDownloadDirectoryRequest(bucketName, s3Directory, localDirectory); + return await DownloadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task DownloadDirectoryWithResponseAsync(TransferUtilityDownloadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(DownloadDirectoryWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "DownloadDirectory"); + var command = new DownloadDirectoryCommand(this._s3Client, request, this._config, true); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + + #endregion + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) { validate(request); diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs index 46eddc3d5793..e11731050c43 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs @@ -152,6 +152,13 @@ public partial interface ITransferUtility /// Downloads the objects in Amazon S3 that have a key that starts with the value /// specified by s3Directory. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// The name of the bucket containing the Amazon S3 objects to download. /// @@ -172,6 +179,13 @@ public partial interface ITransferUtility /// specified by the S3Directory /// property of the passed in TransferUtilityDownloadDirectoryRequest object. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// Contains all the parameters required to download objects from Amazon S3 /// into a local directory. diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index e8387a3ef2d9..eeb52f04050c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -534,6 +534,13 @@ public partial interface ITransferUtility /// Downloads the objects in Amazon S3 that have a key that starts with the value /// specified by s3Directory. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// The name of the bucket containing the Amazon S3 objects to download. /// @@ -550,11 +557,48 @@ public partial interface ITransferUtility /// specified by the S3Directory /// property of the passed in TransferUtilityDownloadDirectoryRequest object. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// Contains all the parameters required to download objects from Amazon S3 /// into a local directory. /// void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request); + + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by s3Directory, returning response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// The name of the bucket containing the Amazon S3 objects to download. + /// + /// + /// The directory in Amazon S3 to download. + /// + /// + /// The local directory to download the objects to. + /// + /// Response metadata including the number of objects downloaded. + TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(string bucketName, string s3Directory, string localDirectory); + + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by the S3Directory property of the passed in + /// TransferUtilityDownloadDirectoryRequest object, returning response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// Contains all the parameters required to download objects from Amazon S3 + /// into a local directory. + /// + /// Response metadata including the number of objects downloaded. + TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(TransferUtilityDownloadDirectoryRequest request); #endregion #region AbortMultipartUploads diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index bc36fdc78e74..457360ccd3f7 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -327,6 +327,34 @@ public void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request) ExceptionDispatchInfo.Capture(e.InnerException).Throw(); } } + + /// + public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(string bucketName, string s3Directory, string localDirectory) + { + try + { + return DownloadDirectoryWithResponseAsync(bucketName, s3Directory, localDirectory).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(TransferUtilityDownloadDirectoryRequest request) + { + try + { + return DownloadDirectoryWithResponseAsync(request).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } #endregion #region AbortMultipartUploads diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryWithResponseTests.cs new file mode 100644 index 000000000000..eaf593f72dbf --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryWithResponseTests.cs @@ -0,0 +1,632 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.DownloadDirectoryWithResponseAsync functionality. + /// These tests verify end-to-end functionality with actual S3 operations and directory I/O. + /// + /// These integration tests focus on: + /// - Basic directory downloads with response object + /// - Progress tracking with response + /// - Multipart downloads in directory context + /// - Concurrent vs sequential downloads + /// - Nested directory structures + /// - Response validation + /// + [TestClass] + public class TransferUtilityDownloadDirectoryWithResponseTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static readonly long KB = 1024; + private static string bucketName; + private static string tempDirectory; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + tempDirectory = Path.Combine(Path.GetTempPath(), "S3DownloadDirectoryTests-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempDirectory); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + + // Clean up temp directory + if (Directory.Exists(tempDirectory)) + { + try + { + Directory.Delete(tempDirectory, recursive: true); + } + catch + { + // Best effort cleanup + } + } + + BaseClean(); + } + + [TestCleanup] + public void TestCleanup() + { + // Clean up any test directories after each test + if (Directory.Exists(tempDirectory)) + { + foreach (var subDir in Directory.GetDirectories(tempDirectory)) + { + try + { + Directory.Delete(subDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + #region Basic Download Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_BasicDownload_ReturnsCorrectResponse() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("basic-download"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 5; + + // Upload test directory + await UploadTestDirectory(keyPrefix, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsDownloaded, "ObjectsDownloaded should match file count"); + + // Verify all files were downloaded + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length, "Downloaded file count should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_EmptyDirectory_ReturnsZeroObjectsDownloaded() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("empty-directory"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + // Act - Download non-existent directory + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(0, response.ObjectsDownloaded, "ObjectsDownloaded should be 0 for empty directory"); + + // Directory may or may not exist, but should have no files + if (Directory.Exists(downloadPath)) + { + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(0, downloadedFiles.Length, "No files should be downloaded"); + } + } + + #endregion + + #region Progress Tracking Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_WithProgressTracking_FiresProgressEvents() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("progress-tracking"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 3; + + await UploadTestDirectory(keyPrefix, 5 * MB, fileCount); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + Assert.IsTrue(progressEvents.Count > 0, "Progress events should have fired"); + + // Verify final progress event + var finalEvent = progressEvents.Last(); + Assert.AreEqual(fileCount, finalEvent.NumberOfFilesDownloaded); + Assert.AreEqual(fileCount, finalEvent.TotalNumberOfFiles); + Assert.AreEqual(finalEvent.TransferredBytes, finalEvent.TotalBytes); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_SequentialMode_IncludesCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-progress"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + await UploadTestDirectory(keyPrefix, 3 * MB, 3); + + var progressEvents = new List(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = false // Sequential mode + }; + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded); + + // In sequential mode, should have CurrentFile populated + var eventsWithFile = progressEvents.Where(e => e.CurrentFile != null).ToList(); + Assert.IsTrue(eventsWithFile.Count > 0, "Should have events with CurrentFile populated"); + + foreach (var evt in eventsWithFile) + { + Assert.IsNotNull(evt.CurrentFile); + Assert.IsTrue(evt.TotalNumberOfBytesForCurrentFile > 0); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_ConcurrentMode_OmitsCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-progress"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + await UploadTestDirectory(keyPrefix, 3 * MB, 4); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = true // Concurrent mode + }; + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(4, response.ObjectsDownloaded); + Assert.IsTrue(progressEvents.Count > 0); + + // In concurrent mode, CurrentFile should be null + foreach (var evt in progressEvents) + { + Assert.IsNull(evt.CurrentFile, "CurrentFile should be null in concurrent mode"); + Assert.AreEqual(0, evt.TransferredBytesForCurrentFile); + Assert.AreEqual(0, evt.TotalNumberOfBytesForCurrentFile); + } + } + + #endregion + + #region Multipart Download Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + [TestCategory("Multipart")] + public async Task DownloadDirectoryWithResponse_WithMultipartFiles_DownloadsSuccessfully() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("multipart-directory"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 3; + + // Upload directory with large files to trigger multipart (>16MB threshold) + await UploadTestDirectory(keyPrefix, 20 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + + // Verify all files downloaded with correct sizes + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length); + + foreach (var file in downloadedFiles) + { + var fileInfo = new FileInfo(file); + Assert.AreEqual(20 * MB, fileInfo.Length, $"File {file} should be 20MB"); + } + + VerifyNoTempFilesExist(downloadPath); + } + + #endregion + + #region Nested Directory Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_NestedDirectories_PreservesStructure() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("nested-structure"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + // Upload nested directory structure + var nestedFiles = new Dictionary + { + { "level1/file1.txt", 1 * MB }, + { "level1/level2/file2.txt", 2 * MB }, + { "level1/level2/level3/file3.txt", 3 * MB } + }; + + await UploadTestFilesWithStructure(keyPrefix, nestedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(nestedFiles.Count, response.ObjectsDownloaded); + + // Verify directory structure + foreach (var kvp in nestedFiles) + { + var expectedPath = Path.Combine(downloadPath, kvp.Key.Replace('/', Path.DirectorySeparatorChar)); + Assert.IsTrue(File.Exists(expectedPath), $"File should exist: {expectedPath}"); + + var fileInfo = new FileInfo(expectedPath); + Assert.AreEqual(kvp.Value, fileInfo.Length, $"File size should match: {expectedPath}"); + } + } + + #endregion + + #region Concurrent vs Sequential Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_ConcurrentMode_DownloadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-download"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 10; + + await UploadTestDirectory(keyPrefix, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = true + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_SequentialMode_DownloadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-download"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 5; + + await UploadTestDirectory(keyPrefix, 3 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = false + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length); + } + + #endregion + + #region Mixed File Size Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_MixedFileSizes_DownloadsAll() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("mixed-sizes"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + var mixedFiles = new Dictionary + { + { "tiny.txt", 100 }, // 100 bytes + { "small.txt", 512 * KB }, // 512 KB + { "medium.txt", 5 * MB }, // 5 MB + { "large.txt", 20 * MB } // 20 MB (multipart) + }; + + await UploadTestFilesWithStructure(keyPrefix, mixedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(mixedFiles.Count, response.ObjectsDownloaded); + + // Verify each file's size + foreach (var kvp in mixedFiles) + { + var filePath = Path.Combine(downloadPath, kvp.Key); + Assert.IsTrue(File.Exists(filePath), $"File should exist: {filePath}"); + + var fileInfo = new FileInfo(filePath); + Assert.AreEqual(kvp.Value, fileInfo.Length, $"File size should match: {filePath}"); + } + } + + #endregion + + #region Helper Methods + + /// + /// Uploads a test directory with specified number of files using TransferUtility.UploadDirectory + /// + private static async Task UploadTestDirectory(string keyPrefix, long fileSize, int fileCount) + { + // Create local temp directory structure + var tempUploadDir = Path.Combine(Path.GetTempPath(), "upload-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempUploadDir); + + try + { + // Generate files locally + for (int i = 0; i < fileCount; i++) + { + var fileName = $"file{i}.dat"; + var localPath = Path.Combine(tempUploadDir, fileName); + UtilityMethods.GenerateFile(localPath, fileSize); + } + + // Upload entire directory using TransferUtility + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = tempUploadDir, + KeyPrefix = keyPrefix, + SearchPattern = "*.dat", // Only match test data files, not system files + SearchOption = SearchOption.AllDirectories + }; + + await transferUtility.UploadDirectoryAsync(request); + } + finally + { + // Cleanup temp directory + if (Directory.Exists(tempUploadDir)) + { + try + { + Directory.Delete(tempUploadDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + /// + /// Uploads test files with specific structure using TransferUtility.UploadDirectory + /// + private static async Task UploadTestFilesWithStructure(string keyPrefix, Dictionary files) + { + // Create local temp directory structure + var tempUploadDir = Path.Combine(Path.GetTempPath(), "upload-struct-" + Guid.NewGuid().ToString()); + + try + { + // Generate files with directory structure locally + foreach (var kvp in files) + { + var localPath = Path.Combine(tempUploadDir, kvp.Key.Replace('/', Path.DirectorySeparatorChar)); + var directory = Path.GetDirectoryName(localPath); + + if (!string.IsNullOrEmpty(directory)) + { + Directory.CreateDirectory(directory); + } + + UtilityMethods.GenerateFile(localPath, kvp.Value); + } + + // Upload entire directory using TransferUtility + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = tempUploadDir, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + await transferUtility.UploadDirectoryAsync(request); + } + finally + { + // Cleanup temp directory + if (Directory.Exists(tempUploadDir)) + { + try + { + Directory.Delete(tempUploadDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + /// + /// Verifies that no temporary files remain after download completion. + /// Temp files use the pattern: {originalPath}.s3tmp.{8-char-id} + /// + private static void VerifyNoTempFilesExist(string directoryPath) + { + if (Directory.Exists(directoryPath)) + { + var tempFiles = Directory.GetFiles(directoryPath, "*.s3tmp.*", SearchOption.AllDirectories); + Assert.AreEqual(0, tempFiles.Length, + $"No temporary files should remain. Found: {string.Join(", ", tempFiles)}"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs new file mode 100644 index 000000000000..4a813eca10ae --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs @@ -0,0 +1,994 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class DownloadDirectoryCommandTests + { + private string _testDirectory; + private Mock _mockS3Client; + private TransferUtilityConfig _config; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + _mockS3Client = new Mock(); + _config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 4 + }; + + // Setup default S3 client config + var s3Config = new AmazonS3Config + { + BufferSize = 8192, + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesCommand() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + public void Constructor_WithUseMultipartDownload_CreatesCommand() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, useMultipartDownload: true); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + public void Constructor_WithConfigAndMultipart_CreatesCommand() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullS3Client_ThrowsArgumentNullException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(null, request); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullRequest_ThrowsArgumentNullException() + { + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, null); + } + + #endregion + + #region ValidateRequest Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.BucketName = null; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.BucketName = ""; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingS3Directory_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.S3Directory = null; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyS3Directory_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.S3Directory = ""; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingLocalDirectory_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.LocalDirectory = null; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + #endregion + + #region ExecuteAsync Tests - Empty Directory + + [TestMethod] + public async Task ExecuteAsync_EmptyDirectory_ReturnsZeroObjectsDownloaded() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + SetupEmptyDirectoryListing(); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsDownloaded); + } + + [TestMethod] + public async Task ExecuteAsync_EmptyDirectoryWithMultipart_ReturnsZeroObjectsDownloaded() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + SetupEmptyDirectoryListing(); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsDownloaded); + } + + #endregion + + #region ExecuteAsync Tests - Single File + + [TestMethod] + public async Task ExecuteAsync_SingleFile_DownloadsSuccessfully() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var fileSize = 1024; + SetupSingleFileDirectoryListing("test-file.txt", fileSize); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsDownloaded); + + var downloadedFile = Path.Combine(_testDirectory, "test-file.txt"); + Assert.IsTrue(File.Exists(downloadedFile)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, fileSize)); + } + + [TestMethod] + public async Task ExecuteAsync_SingleFileWithMultipart_DownloadsSuccessfully() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var fileSize = 1024; + SetupSingleFileDirectoryListing("test-file.txt", fileSize, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsDownloaded); + + var downloadedFile = Path.Combine(_testDirectory, "test-file.txt"); + Assert.IsTrue(File.Exists(downloadedFile)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, fileSize)); + } + + #endregion + + #region ExecuteAsync Tests - Multiple Files + + [TestMethod] + public async Task ExecuteAsync_MultipleFiles_DownloadsAll() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential for predictable testing + + var files = new Dictionary + { + { "file1.txt", 512 }, + { "file2.txt", 1024 }, + { "file3.txt", 2048 } + }; + + SetupMultipleFilesDirectoryListing(files); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + + foreach (var file in files) + { + var downloadedFile = Path.Combine(_testDirectory, file.Key); + Assert.IsTrue(File.Exists(downloadedFile), $"File {file.Key} should exist"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, file.Value), + $"File {file.Key} should have size {file.Value}"); + } + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesWithMultipart_DownloadsAll() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential for predictable testing + + var files = new Dictionary + { + { "large1.dat", 10 * 1024 * 1024 }, // 10MB + { "large2.dat", 15 * 1024 * 1024 } // 15MB + }; + + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesConcurrent_DownloadsAll() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; // Concurrent downloads + + var files = new Dictionary + { + { "file1.txt", 512 }, + { "file2.txt", 1024 }, + { "file3.txt", 2048 }, + { "file4.txt", 4096 } + }; + + SetupMultipleFilesDirectoryListing(files); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + + foreach (var file in files) + { + var downloadedFile = Path.Combine(_testDirectory, file.Key); + Assert.IsTrue(File.Exists(downloadedFile), $"File {file.Key} should exist"); + } + } + + #endregion + + #region ExecuteAsync Tests - Nested Directories + + [TestMethod] + public async Task ExecuteAsync_NestedDirectories_CreatesStructure() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; + + var files = new Dictionary + { + { "level1/file1.txt", 512 }, + { "level1/level2/file2.txt", 1024 }, + { "level1/level2/level3/file3.txt", 2048 } + }; + + SetupMultipleFilesDirectoryListing(files); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + + foreach (var file in files) + { + var downloadedFile = Path.Combine(_testDirectory, file.Key.Replace('/', Path.DirectorySeparatorChar)); + Assert.IsTrue(File.Exists(downloadedFile), $"File {file.Key} should exist at {downloadedFile}"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, file.Value)); + } + } + + #endregion + + #region ExecuteAsync Tests - Cancellation + + [TestMethod] + public async Task ExecuteAsync_WithCancelledToken_ThrowsOperationCanceledException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + SetupSingleFileDirectoryListing("test.txt", 1024); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act & Assert + try + { + await command.ExecuteAsync(cts.Token); + Assert.Fail("Expected an OperationCanceledException to be thrown"); + } + catch (OperationCanceledException) + { + // Expected - TaskCanceledException inherits from OperationCanceledException + // This is the correct behavior + } + } + + [TestMethod] + public async Task ExecuteAsync_CancellationDuringDownload_CleansUpProperly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var files = new Dictionary + { + { "file1.txt", 512 }, + { "file2.txt", 1024 } + }; + + var cts = new CancellationTokenSource(); + + // Setup to cancel after first file starts downloading + var callCount = 0; + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(() => CreateListObjectsResponse(files)); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Callback(() => + { + callCount++; + if (callCount == 1) + cts.Cancel(); + }) + .ThrowsAsync(new OperationCanceledException()); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + try + { + await command.ExecuteAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - partial files should be cleaned up + await Task.Delay(100); // Give cleanup time to complete + } + + #endregion + + #region ExecuteAsync Tests - Edge Cases + + [TestMethod] + public async Task ExecuteAsync_DirectoryMarkers_SkipsDirectoryObjects() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Include directory markers (keys ending with /) + var listResponse = new ListObjectsResponse + { + S3Objects = new List + { + new S3Object { Key = "prefix/subdir/", Size = 0 }, + new S3Object { Key = "prefix/file.txt", Size = 1024 } + } + }; + + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + SetupGetObjectForFile("prefix/file.txt", 1024); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, response.ObjectsDownloaded); // Only the file, not the directory marker + } + + [TestMethod] + public async Task ExecuteAsync_ExistingFiles_OverwritesCorrectly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var fileName = "existing-file.txt"; + var filePath = Path.Combine(_testDirectory, fileName); + + // Create existing file with old content + var oldData = MultipartDownloadTestHelpers.GenerateTestData(512, 999); + File.WriteAllBytes(filePath, oldData); + + var newFileSize = 1024; + SetupSingleFileDirectoryListing(fileName, newFileSize); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, response.ObjectsDownloaded); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(filePath, newFileSize)); + + // Verify content was overwritten + var newData = File.ReadAllBytes(filePath); + Assert.AreNotEqual(oldData.Length, newData.Length); + } + + #endregion + + #region Progress Tracking Tests + + [TestMethod] + public async Task ExecuteAsync_SingleFileMultipart_FiresProgressEvents() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var progressEvents = new List(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var fileSize = 10 * 1024 * 1024; // 10MB + SetupSingleFileDirectoryListing("test.dat", fileSize, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // Verify final event + var finalEvent = progressEvents.Last(); + Assert.AreEqual(1, finalEvent.NumberOfFilesDownloaded, "Should have downloaded 1 file"); + Assert.AreEqual(1, finalEvent.TotalNumberOfFiles, "Should have 1 total file"); + Assert.AreEqual(fileSize, finalEvent.TransferredBytes, "All bytes should be transferred"); + Assert.AreEqual(fileSize, finalEvent.TotalBytes, "Total bytes should match file size"); + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesMultipart_AggregatesProgressCorrectly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential for predictable testing + + var progressEvents = new List(); + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var files = new Dictionary + { + { "file1.dat", 5 * 1024 * 1024 }, // 5MB + { "file2.dat", 10 * 1024 * 1024 } // 10MB + }; + + var totalBytes = files.Values.Sum(); + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + var finalEvent = progressEvents.Last(); + Assert.AreEqual(2, finalEvent.NumberOfFilesDownloaded, "Should have downloaded 2 files"); + Assert.AreEqual(2, finalEvent.TotalNumberOfFiles, "Should have 2 total files"); + Assert.AreEqual(totalBytes, finalEvent.TransferredBytes, "All bytes should be transferred"); + Assert.AreEqual(totalBytes, finalEvent.TotalBytes, "Total bytes should match sum of all files"); + + // Verify progress increases monotonically + long lastTransferred = 0; + foreach (var evt in progressEvents) + { + Assert.IsTrue(evt.TransferredBytes >= lastTransferred, + "TransferredBytes should never decrease"); + lastTransferred = evt.TransferredBytes; + } + } + + [TestMethod] + public async Task ExecuteAsync_ConcurrentMultipart_FiresProgressCorrectly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; // Concurrent + + var progressEvents = new List(); + var progressLock = new object(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var files = new Dictionary + { + { "file1.dat", 8 * 1024 * 1024 }, // 8MB + { "file2.dat", 8 * 1024 * 1024 }, // 8MB + { "file3.dat", 8 * 1024 * 1024 } // 8MB + }; + + var totalBytes = files.Values.Sum(); + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // Verify monotonic increase in transferred bytes despite concurrent execution + long lastTransferred = 0; + foreach (var evt in progressEvents) + { + Assert.IsTrue(evt.TransferredBytes >= lastTransferred, + "TransferredBytes should never decrease even in concurrent mode"); + lastTransferred = evt.TransferredBytes; + } + + var finalEvent = progressEvents.Last(); + Assert.AreEqual(3, finalEvent.NumberOfFilesDownloaded, "Should have downloaded 3 files"); + Assert.AreEqual(totalBytes, finalEvent.TransferredBytes, "All bytes should be transferred"); + } + + [TestMethod] + public async Task ExecuteAsync_ConcurrentMode_OmitsCurrentFileDetails() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; + + var progressEvents = new List(); + var progressLock = new object(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + SetupSingleFileDirectoryListing("test.dat", 8 * 1024 * 1024, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // In concurrent mode, current file details should be null/zero + foreach (var evt in progressEvents) + { + Assert.IsNull(evt.CurrentFile, "CurrentFile should be null in concurrent mode"); + Assert.AreEqual(0, evt.TransferredBytesForCurrentFile, + "TransferredBytesForCurrentFile should be 0 in concurrent mode"); + Assert.AreEqual(0, evt.TotalNumberOfBytesForCurrentFile, + "TotalNumberOfBytesForCurrentFile should be 0 in concurrent mode"); + } + } + + [TestMethod] + public async Task ExecuteAsync_SequentialMode_IncludesCurrentFileDetails() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential + + var progressEvents = new List(); + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var fileSize = 5 * 1024 * 1024; // 5MB + SetupSingleFileDirectoryListing("test-file.dat", fileSize, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // In sequential mode, current file details should be populated + var eventsWithFile = progressEvents.Where(e => e.CurrentFile != null).ToList(); + Assert.IsTrue(eventsWithFile.Count > 0, "Should have events with CurrentFile populated"); + + foreach (var evt in eventsWithFile) + { + Assert.AreEqual("test-file.dat", evt.CurrentFile, "CurrentFile should be set"); + Assert.IsTrue(evt.TotalNumberOfBytesForCurrentFile > 0, + "TotalNumberOfBytesForCurrentFile should be greater than 0"); + } + + // Verify final event has complete file details + var finalEvent = progressEvents.Last(); + Assert.AreEqual("test-file.dat", finalEvent.CurrentFile); + Assert.AreEqual(fileSize, finalEvent.TotalNumberOfBytesForCurrentFile); + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesSequential_TracksPerFileProgress() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; + + var progressEvents = new List(); + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var files = new Dictionary + { + { "small.dat", 2 * 1024 * 1024 }, // 2MB + { "medium.dat", 5 * 1024 * 1024 }, // 5MB + { "large.dat", 10 * 1024 * 1024 } // 10MB + }; + + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // Verify we see progress for each file + var filesTracked = progressEvents + .Where(e => e.CurrentFile != null) + .Select(e => e.CurrentFile) + .Distinct() + .ToList(); + + Assert.AreEqual(3, filesTracked.Count, "Should track progress for all 3 files"); + Assert.IsTrue(filesTracked.Contains("small.dat"), "Should track small.dat"); + Assert.IsTrue(filesTracked.Contains("medium.dat"), "Should track medium.dat"); + Assert.IsTrue(filesTracked.Contains("large.dat"), "Should track large.dat"); + } + + [TestMethod] + public async Task ExecuteAsync_ProgressEventsCancellation_StopsProgressTracking() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; + + var progressEvents = new List(); + var cts = new CancellationTokenSource(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + // Cancel after first progress event + if (progressEvents.Count == 1) + { + cts.Cancel(); + } + }; + + var files = new Dictionary + { + { "file1.dat", 5 * 1024 * 1024 }, + { "file2.dat", 5 * 1024 * 1024 } + }; + + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act & Assert + try + { + await command.ExecuteAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Verify we got at least one progress event before cancellation + Assert.IsTrue(progressEvents.Count >= 1, "Should have fired at least one progress event"); + } + + #endregion + + #region Helper Methods + + private TransferUtilityDownloadDirectoryRequest CreateDownloadDirectoryRequest( + string bucketName = "test-bucket", + string s3Directory = "prefix", + string localDirectory = null) + { + localDirectory = localDirectory ?? _testDirectory; + + return new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = s3Directory, + LocalDirectory = localDirectory + }; + } + + private void SetupEmptyDirectoryListing() + { + var listResponse = new ListObjectsResponse + { + S3Objects = new List() + }; + + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + } + + private void SetupSingleFileDirectoryListing(string fileName, long fileSize, bool setupForMultipart = false) + { + var files = new Dictionary { { fileName, fileSize } }; + SetupMultipleFilesDirectoryListing(files, setupForMultipart); + } + + private void SetupMultipleFilesDirectoryListing(Dictionary files, bool setupForMultipart = false) + { + var listResponse = CreateListObjectsResponse(files); + + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + // Setup GetObject for each file + foreach (var file in files) + { + SetupGetObjectForFile($"prefix/{file.Key}", file.Value, setupForMultipart); + } + } + + private ListObjectsResponse CreateListObjectsResponse(Dictionary files) + { + var s3Objects = files.Select(f => new S3Object + { + Key = $"prefix/{f.Key}", + Size = f.Value + }).ToList(); + + return new ListObjectsResponse + { + S3Objects = s3Objects + }; + } + + private void SetupGetObjectForFile(string key, long fileSize, bool setupForMultipart = false) + { + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + if (setupForMultipart) + { + // For multipart downloads using PART strategy, we need to: + // 1. First request (PartNumber=1) returns PartsCount > 1 + // 2. Subsequent requests for each part + + var partsCount = (int)Math.Ceiling((double)fileSize / (8 * 1024 * 1024)); // 8MB parts + if (partsCount < 2) partsCount = 2; // Force multipart for testing + + var partSize = fileSize / partsCount; + var lastPartSize = fileSize - (partSize * (partsCount - 1)); + + // Setup first part request (discovery) + var firstPartData = MultipartDownloadTestHelpers.GenerateTestData((int)partSize, 0); + var firstPartResponse = new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = partSize, + ResponseStream = new MemoryStream(firstPartData), + ContentRange = $"bytes 0-{partSize - 1}/{fileSize}", + ETag = "\"test-etag\"", + PartsCount = partsCount + }; + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.Is(r => r.Key == key && r.PartNumber == 1), + It.IsAny())) + .ReturnsAsync(() => + { + // Return new stream each time to avoid disposed stream issues + var newData = MultipartDownloadTestHelpers.GenerateTestData((int)partSize, 0); + return new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = partSize, + ResponseStream = new MemoryStream(newData), + ContentRange = $"bytes 0-{partSize - 1}/{fileSize}", + ETag = "\"test-etag\"", + PartsCount = partsCount + }; + }); + + // Setup subsequent part requests + for (int i = 2; i <= partsCount; i++) + { + var partNum = i; + var currentPartSize = (partNum == partsCount) ? lastPartSize : partSize; + var startByte = (partNum - 1) * partSize; + var endByte = startByte + currentPartSize - 1; + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.Is(r => r.Key == key && r.PartNumber == partNum), + It.IsAny())) + .ReturnsAsync(() => + { + var partData = MultipartDownloadTestHelpers.GenerateTestData((int)currentPartSize, (int)startByte); + return new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = currentPartSize, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {startByte}-{endByte}/{fileSize}", + ETag = "\"test-etag\"", + PartsCount = partsCount + }; + }); + } + } + else + { + // For non-multipart (simple) downloads + var response = new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + ETag = "\"test-etag\"" + }; + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.Is(r => r.Key == key), + It.IsAny())) + .ReturnsAsync(() => + { + // Return new stream each time to avoid disposed stream issues + var newData = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + return new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(newData), + ETag = "\"test-etag\"" + }; + }); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs index bacc470411b9..b0b0ebba9f5e 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs @@ -91,6 +91,42 @@ public void Constructor_WithNullConfig_ThrowsArgumentNullException() var command = new MultipartDownloadCommand(_mockS3Client.Object, request, null); } + [TestMethod] + public void Constructor_WithSharedHttpThrottler_CreatesCommand() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + var sharedThrottler = new SemaphoreSlim(10); + + try + { + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Assert + Assert.IsNotNull(command); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public void Constructor_WithNullSharedHttpThrottler_CreatesCommand() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedHttpThrottler: null); + + // Assert + Assert.IsNotNull(command); + } + #endregion #region ValidateRequest Tests @@ -743,6 +779,158 @@ public async Task Integration_NestedDirectory_CreatesDirectoryAndDownloads() #endregion + #region Shared HTTP Throttler Tests + + [TestMethod] + public async Task ExecuteAsync_WithSharedHttpThrottler_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "throttled-download.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + + var sharedThrottler = new SemaphoreSlim(10); + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public async Task ExecuteAsync_WithoutSharedHttpThrottler_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "no-throttler-download.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedHttpThrottler: null); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + } + + [TestMethod] + public async Task ExecuteAsync_SharedHttpThrottler_DoesNotBlockSinglePartDownload() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "single-part-throttled.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 512; // Small file (single part) + SetupSuccessfulSinglePartDownload(fileSize); + + // Create throttler with limited capacity + var sharedThrottler = new SemaphoreSlim(1); + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + + // Verify throttler was not exhausted (single part doesn't use it heavily) + Assert.AreEqual(1, sharedThrottler.CurrentCount); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public async Task ExecuteAsync_SharedHttpThrottler_ReleasedOnSuccess() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "throttler-released.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + + var sharedThrottler = new SemaphoreSlim(5); + var initialCount = sharedThrottler.CurrentCount; + + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert - throttler should be back to initial state + Assert.AreEqual(initialCount, sharedThrottler.CurrentCount); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public async Task ExecuteAsync_SharedHttpThrottler_ReleasedOnException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "throttler-released-error.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("Test exception")); + + var sharedThrottler = new SemaphoreSlim(5); + var initialCount = sharedThrottler.CurrentCount; + + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await command.ExecuteAsync(CancellationToken.None)); + + // Throttler should be back to initial state even after exception + Assert.AreEqual(initialCount, sharedThrottler.CurrentCount); + } + finally + { + sharedThrottler.Dispose(); + } + } + + #endregion + #region Helper Methods private void SetupSuccessfulSinglePartDownload( From dd150cbf9a8d1410783f852300a13b077de3b131 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 1 Dec 2025 17:33:51 -0500 Subject: [PATCH 31/56] Optimize part streaming (#4162) --- .../Internal/BufferedMultipartStream.cs | 10 +- .../Internal/BufferedPartDataHandler.cs | 191 +++- .../Transfer/Internal/FilePartDataHandler.cs | 38 +- .../Transfer/Internal/IPartBufferManager.cs | 7 + .../Internal/MultipartDownloadManager.cs | 49 +- .../Transfer/Internal/PartBufferManager.cs | 40 +- .../Transfer/Internal/StreamingDataSource.cs | 230 +++++ .../Custom/BufferedPartDataHandlerTests.cs | 859 ++++++++++++------ .../Custom/PartBufferManagerTests.cs | 412 ++++++++- .../Custom/StreamingDataSourceTests.cs | 708 +++++++++++++++ 10 files changed, 2185 insertions(+), 359 deletions(-) create mode 100644 sdk/src/Services/S3/Custom/Transfer/Internal/StreamingDataSource.cs create mode 100644 sdk/test/Services/S3/UnitTests/Custom/StreamingDataSourceTests.cs diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs index f5085a197eea..1f9f2aef5688 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs @@ -282,6 +282,8 @@ private void ThrowIfDisposed() /// /// This method disposes the underlying and , /// which in turn cleans up any buffered part data and returns ArrayPool buffers to the pool. + /// It also disposes the InitialResponse from the discovery result, which contains the HTTP connection + /// and network stream that must be explicitly disposed to return the connection to the pool. /// [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] protected override void Dispose(bool disposing) @@ -290,7 +292,13 @@ protected override void Dispose(bool disposing) { try { - // Dispose modular dependencies + // Dispose InitialResponse first (contains HTTP connection and network stream) + // This is critical because GetObjectResponse holds unmanaged resources that + // won't be cleaned up by GC - must be explicitly disposed to return HTTP + // connection to the pool and close network streams + _discoveryResult?.InitialResponse?.Dispose(); + + // Then dispose modular dependencies _downloadCoordinator?.Dispose(); _partBufferManager?.Dispose(); } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs index 02fb974c7f72..33e8a7f9816d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -30,9 +30,26 @@ namespace Amazon.S3.Transfer.Internal { /// - /// Buffers downloaded parts in memory using and . - /// Implements current streaming behavior for multipart downloads. + /// Handles multipart download data with intelligent stream-vs-buffer decision making. + /// Optimizes for sequential part arrival by streaming directly to consumer when possible, + /// while buffering out-of-order parts into memory using . /// + /// + /// Optimization Strategy: + /// + /// Parts arriving in expected order (matching NextExpectedPartNumber) stream directly to consumer + /// Out-of-order parts buffer into ArrayPool memory for later sequential consumption + /// Best case: All parts in order → zero buffering → pure streaming + /// Worst case: All parts out of order → full buffering (original behavior) + /// + /// + /// + /// Response Ownership: + /// + /// Streaming: StreamingDataSource takes ownership and disposes after reading + /// Buffering: Handler disposes response immediately after buffering completes + /// + /// internal class BufferedPartDataHandler : IPartDataHandler { private readonly IPartBufferManager _partBufferManager; @@ -64,28 +81,163 @@ public Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationTo } /// + /// + /// + /// Intelligently chooses between streaming and buffering based on part arrival order: + /// + /// + /// If partNumber matches NextExpectedPartNumber: Stream directly (no buffering) + /// Otherwise: Buffer into memory for later sequential consumption + /// + /// Response Ownership: + /// + /// This method takes ownership of the response and is responsible for disposing it in ALL cases, + /// including error scenarios. Callers must NOT dispose the response after calling this method. + /// + /// public async Task ProcessPartAsync( int partNumber, GetObjectResponse response, CancellationToken cancellationToken) { - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Starting to buffer part from response stream - ContentLength={1}", - partNumber, response.ContentLength); + if (partNumber == _partBufferManager.NextExpectedPartNumber) + { + await ProcessStreamingPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + } + else + { + await ProcessBufferedPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + } + } + + /// + /// Processes a part that arrives in expected order by streaming it directly without buffering. + /// Takes ownership of the response and transfers it to the StreamingDataSource. + /// + /// The part number being processed. + /// The GetObjectResponse containing the part data. Ownership is transferred to StreamingDataSource. + /// Cancellation token for the operation. + /// + /// This method is called when the part arrives in the expected sequential order, allowing + /// for optimal zero-copy streaming directly to the consumer without buffering into memory. + /// + /// OWNERSHIP TRANSFER: + /// 1. Response is passed to StreamingDataSource constructor (StreamingDataSource takes ownership) + /// 2. StreamingDataSource is added to buffer manager (buffer manager takes ownership) + /// 3. After successful AddBufferAsync, we null out our reference to mark ownership transfer + /// 4. Buffer manager will dispose StreamingDataSource (which disposes response) during cleanup + /// + /// ERROR HANDLING: + /// - If StreamingDataSource constructor fails: We dispose the response (still our responsibility) + /// - If constructor succeeds but AddBufferAsync fails: StreamingDataSource.Dispose() handles the response + /// - If AddBufferAsync succeeds: Buffer manager owns everything and will clean up + /// + private async Task ProcessStreamingPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Matches NextExpectedPartNumber - streaming directly without buffering", + partNumber); - // Buffer the part from the response stream into memory - var buffer = await BufferPartFromResponseAsync( - partNumber, - response, - cancellationToken).ConfigureAwait(false); + StreamingDataSource streamingDataSource = null; + var ownsResponse = true; // Track if we still own the response + + try + { + // Create a StreamingDataSource that will stream directly from the response + // If successful, StreamingDataSource takes ownership of the response and will dispose it + streamingDataSource = new StreamingDataSource(partNumber, response); + ownsResponse = false; // Ownership transferred to StreamingDataSource - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Buffered {1} bytes into memory", - partNumber, buffer.Length); + // Add the streaming data source to the buffer manager + // After this succeeds, the buffer manager owns the data source + _partBufferManager.AddBuffer(streamingDataSource); - // Add the buffered part to the buffer manager - _partBufferManager.AddBuffer(buffer); + // Mark ownership transfer by nulling our reference + // If ReleaseBufferSpace() throws, we no longer own the data source, so we won't dispose it + streamingDataSource = null; - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Added to buffer manager", - partNumber); + // Release capacity immediately since we're not holding anything in memory + _partBufferManager.ReleaseBufferSpace(); + + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] StreamingDataSource added and capacity released", + partNumber); + } + catch (Exception ex) + { + Logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process streaming part", partNumber); + + // Dispose response if we still own it (constructor failed before taking ownership) + if (ownsResponse) + response?.Dispose(); + + // Dispose StreamingDataSource if we created it but buffer manager doesn't own it yet + // If null, the buffer manager owns it and will handle cleanup + streamingDataSource?.Dispose(); + + throw; + } + } + + /// + /// Processes a part that arrives out of order by buffering it into memory. + /// Takes ownership of the response and disposes it after buffering completes. + /// + /// The part number being processed. + /// The GetObjectResponse containing the part data. This method owns and disposes it. + /// Cancellation token for the operation. + /// + /// This method is called when the part arrives out of the expected sequential order. + /// The part data is buffered into ArrayPool memory for later sequential consumption. + /// + /// OWNERSHIP: + /// - Response is read and buffered into StreamPartBuffer + /// - Response is disposed immediately after buffering (no longer needed) + /// - StreamPartBuffer is added to buffer manager (buffer manager takes ownership) + /// - Buffer manager will dispose StreamPartBuffer during cleanup + /// + /// ERROR HANDLING: + /// - Always dispose response in catch block since we own it throughout this method + /// - BufferPartFromResponseAsync handles its own cleanup of StreamPartBuffer on error + /// + private async Task ProcessBufferedPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Out of order (NextExpected={1}) - buffering to memory", + partNumber, _partBufferManager.NextExpectedPartNumber); + + try + { + // Buffer the part from the response stream into memory + var buffer = await BufferPartFromResponseAsync( + partNumber, + response, + cancellationToken).ConfigureAwait(false); + + // Response has been fully read and buffered - dispose it now + response?.Dispose(); + + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Buffered {1} bytes into memory", + partNumber, buffer.Length); + + // Add the buffered part to the buffer manager + _partBufferManager.AddBuffer(buffer); + + Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Added to buffer manager (capacity will be released after consumption)", + partNumber); + } + catch (Exception ex) + { + Logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process buffered part", partNumber); + + // We own the response throughout this method, so dispose it on error + response?.Dispose(); + + throw; + } } /// @@ -112,6 +264,15 @@ public void Dispose() // _partBufferManager is owned by caller, don't dispose } + /// + /// Buffers a part from the GetObjectResponse stream into ArrayPool memory. + /// Used when a part arrives out of order and cannot be streamed directly. + /// + /// The part number being buffered. + /// The GetObjectResponse containing the part data stream. + /// Cancellation token for the operation. + /// A containing the buffered part data. + /// Thrown when buffering fails. The StreamPartBuffer will be disposed automatically. private async Task BufferPartFromResponseAsync( int partNumber, GetObjectResponse response, diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs index 09e043ee8729..1d1e4452b311 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs @@ -69,26 +69,42 @@ public Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationTo } /// + /// + /// Response Ownership: + /// + /// This method takes ownership of the response and is responsible for disposing it in ALL cases, + /// including error scenarios. Callers must NOT dispose the response after calling this method. + /// + /// public async Task ProcessPartAsync( int partNumber, GetObjectResponse response, CancellationToken cancellationToken) { - Logger.DebugFormat("FilePartDataHandler: [Part {0}] Starting to process part - ContentLength={1}", - partNumber, response.ContentLength); + try + { + Logger.DebugFormat("FilePartDataHandler: [Part {0}] Starting to process part - ContentLength={1}", + partNumber, response.ContentLength); - // Calculate offset for this part based on ContentRange or part number - long offset = GetPartOffset(response, partNumber); + // Calculate offset for this part based on ContentRange or part number + long offset = GetPartOffset(response, partNumber); - Logger.DebugFormat("FilePartDataHandler: [Part {0}] Calculated file offset={1}", - partNumber, offset); + Logger.DebugFormat("FilePartDataHandler: [Part {0}] Calculated file offset={1}", + partNumber, offset); - // Write part data to file at the calculated offset - await WritePartToFileAsync(offset, response, cancellationToken) - .ConfigureAwait(false); + // Write part data to file at the calculated offset + await WritePartToFileAsync(offset, response, cancellationToken) + .ConfigureAwait(false); - Logger.DebugFormat("FilePartDataHandler: [Part {0}] File write completed successfully", - partNumber); + Logger.DebugFormat("FilePartDataHandler: [Part {0}] File write completed successfully", + partNumber); + } + finally + { + // Always dispose response after writing to disk (success or failure) + // This releases the HTTP connection back to the pool + response?.Dispose(); + } } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs index 9675c60b321e..5f5c214421b5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs @@ -50,6 +50,13 @@ internal interface IPartBufferManager : IDisposable /// The downloaded part buffer to add. void AddBuffer(StreamPartBuffer buffer); + /// + /// Adds a part data source (streaming or buffered) and signals readers when next expected part arrives. + /// + /// The part data source to add (can be StreamingDataSource or BufferedDataSource). + /// A task that completes when the data source has been added and signaling is complete. + void AddBuffer(IPartDataSource dataSource); + /// /// Reads data from the buffer manager. Automatically handles sequential part consumption /// and reads across part boundaries to fill the buffer when possible, matching standard Stream.Read() behavior. diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index f56b45195e06..273e8c905092 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -231,20 +231,26 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E ? new EventHandler(DownloadPartProgressEventCallback) : null; - // Attach progress callback to Part 1's response if provided - if (wrappedCallback != null) + try { - discoveryResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; + // Attach progress callback to Part 1's response if provided + if (wrappedCallback != null) + { + discoveryResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; + } + + // Process Part 1 from InitialResponse (applies to both single-part and multipart) + Logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); + await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); } - - // Process Part 1 from InitialResponse (applies to both single-part and multipart) - Logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); - await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); - - // Detach the event handler after processing to prevent memory leak - if (wrappedCallback != null) + finally { - discoveryResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; + // Always detach the event handler to prevent memory leak + // This runs whether ProcessPartAsync succeeds or throws + if (wrappedCallback != null) + { + discoveryResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; + } } if (discoveryResult.IsSinglePart) @@ -347,6 +353,7 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNumber); GetObjectResponse response = null; + var ownsResponse = false; // Track if we still own the response try { @@ -393,6 +400,7 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even } response = await _s3Client.GetObjectAsync(getObjectRequest, cancellationToken).ConfigureAwait(false); + ownsResponse = true; // We now own the response // Attach progress callback to response if provided if (progressCallback != null) @@ -424,25 +432,30 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); } - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Starting buffering", partNumber); + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing part (handler will decide: stream or buffer)", partNumber); // Delegate data handling to the handler + // IMPORTANT: Handler takes ownership of response and is responsible for disposing it in ALL cases: + // - If streaming: StreamingDataSource takes ownership and disposes when consumer finishes reading + // - If buffering: Handler disposes immediately after copying data to buffer + // - On error: Handler disposes in its catch block before rethrowing await _dataHandler.ProcessPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + ownsResponse = false; // Ownership transferred to handler - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffering completed successfully", partNumber); + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing completed successfully", partNumber); } catch (Exception ex) { Logger.Error(ex, "MultipartDownloadManager: [Part {0}] Download failed", partNumber); + + // Dispose response if we still own it (error occurred before handler took ownership) + if (ownsResponse) + response?.Dispose(); + // Release capacity on failure _dataHandler.ReleaseCapacity(); throw; } - finally - { - // Always dispose the response since we never transfer ownership - response?.Dispose(); - } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs index c679fcb91f9e..16baf6644384 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs @@ -37,13 +37,12 @@ namespace Amazon.S3.Transfer.Internal /// /// SYNCHRONIZATION PRIMITIVES AND THEIR PURPOSES: /// - /// 1. _nextExpectedPartNumber (int) + /// 1. _nextExpectedPartNumber (volatile int) /// - Purpose: Tracks which part to read next, ensuring sequential consumption - /// - Synchronization: None required - only accessed by the consumer thread - /// - Updates: Simple increment (++) after consuming each part - /// - Reads: Direct reads are safe - int reads are naturally atomic - /// - Why no synchronization needed: Producer threads never access this field, - /// only the single consumer thread reads and writes it sequentially + /// - Synchronization: volatile keyword for memory visibility across threads + /// - Readers: Producer threads (download tasks) check if their part matches to decide stream-vs-buffer + /// - Writer: Consumer thread (single) increments after consuming each part + /// - Thread safety: volatile ensures producer threads see latest value (prevents stale cached reads) /// /// 2. _completionState (volatile of bool and ) /// - Purpose: Atomically tracks download completion status and any error @@ -84,7 +83,7 @@ namespace Amazon.S3.Transfer.Internal /// - Example: With MaxInMemoryParts=10, if parts 5-14 are buffered, the task downloading /// part 15 blocks here until the reader consumes and releases part 5's buffer /// 2. Read part data from S3 into pooled buffer - /// 3. Add buffered part: + /// 3. Add buffered part: await /// - Adds buffer to _partDataSources dictionary /// - Signals _partAvailable to wake consumer if waiting /// 4. Consumer eventually releases the buffer slot after reading the part @@ -147,16 +146,15 @@ internal class PartBufferManager : IPartBufferManager private readonly AutoResetEvent _partAvailable; // Tracks the next part number to consume sequentially. Ensures in-order reading. - // SYNCHRONIZATION: None required - only accessed by the consumer thread - // Consumer advances this after fully consuming each part with simple increment. + // SYNCHRONIZATION: volatile keyword for memory visibility + // - Consumer thread writes: Increments after fully consuming each part + // - Producer threads read: Check if their part matches to decide stream-vs-buffer + // - volatile ensures all threads see the most recent value (prevents stale cached reads) + // // Example: Set to 1 initially. After reading part 1, incremented to 2. // Even if part 5 is available, consumer waits for part 2 before proceeding. - // - // Why no synchronization: - // - Only the consumer thread (calling ReadAsync) ever reads or writes this field - // - Producer threads (download tasks) never access it - they only write to the dictionary - // - No concurrent access means no need for volatile, Interlocked, or locks - private int _nextExpectedPartNumber = 1; + // Producer threads checking this value will always see the latest increment. + private volatile int _nextExpectedPartNumber = 1; // Stores download completion status and any error as an atomic unit. // SYNCHRONIZATION: volatile keyword + atomic reference assignment @@ -298,6 +296,18 @@ public void AddBuffer(StreamPartBuffer buffer) AddDataSource(bufferedSource); } + /// + public void AddBuffer(IPartDataSource dataSource) + { + ThrowIfDisposed(); + + if (dataSource == null) + throw new ArgumentNullException(nameof(dataSource)); + + // Delegate directly to AddDataSourceAsync which already handles IPartDataSource + AddDataSource(dataSource); + } + /// public async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/StreamingDataSource.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamingDataSource.cs new file mode 100644 index 000000000000..d203f27d0c61 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamingDataSource.cs @@ -0,0 +1,230 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Stream-based data source that reads directly from GetObjectResponse without buffering. + /// Provides pass-through access to the response stream for optimal memory efficiency when parts arrive in order. + /// + /// + /// This class enables direct streaming optimization for multipart downloads. When a part arrives + /// and happens to be the next expected part in the sequence, we can bypass buffering entirely + /// and stream the response directly to the consumer. + /// + /// OWNERSHIP AND LIFECYCLE: + /// - Takes ownership of the GetObjectResponse and its stream + /// - Responsible for disposing the response (releases HTTP connection) + /// - Consumer reads directly from response stream via ReadAsync + /// - Must be disposed to release network resources + /// + /// THREAD SAFETY: + /// - Designed for single-threaded consumption by PartBufferManager + /// - PartBufferManager guarantees sequential access to each part + /// - No internal synchronization needed + /// + /// COMPLETION TRACKING: + /// - Tracks bytes read vs ContentLength to detect completion + /// - Sets IsComplete when stream exhausted OR expected bytes reached + /// - Handles both normal completion and premature stream closure + /// + internal class StreamingDataSource : IPartDataSource + { + private readonly GetObjectResponse _response; + private readonly Stream _responseStream; + private readonly long _expectedBytes; + private readonly int _partNumber; + private long _bytesRead; + private bool _isComplete; + private bool _disposed; + + #region Logger + + private Logger Logger + { + get + { + return Logger.GetLogger(typeof(TransferUtility)); + } + } + + #endregion + + /// + public int PartNumber + { + get + { + ThrowIfDisposed(); + return _partNumber; + } + } + + /// + public bool IsComplete + { + get + { + ThrowIfDisposed(); + return _isComplete; + } + } + + /// + /// Initializes a new instance of the class. + /// Takes ownership of the GetObjectResponse and its stream. + /// + /// The 1-based part number this source represents. + /// The GetObjectResponse containing the stream to read from. Ownership is transferred. + /// Thrown when is null. + /// + /// CRITICAL: This constructor takes ownership of the response. The caller must NOT dispose it. + /// The StreamingDataSource will dispose the response when it is disposed. + /// + public StreamingDataSource(int partNumber, GetObjectResponse response) + { + if (response == null) + throw new ArgumentNullException(nameof(response)); + + _partNumber = partNumber; + _response = response; + _responseStream = response.ResponseStream; + _expectedBytes = response.ContentLength; + _bytesRead = 0; + _isComplete = false; + + Logger.DebugFormat("StreamingDataSource: Created for part {0} (ExpectedBytes={1}, streaming directly from response)", + _partNumber, _expectedBytes); + } + + /// + /// + /// Reads directly from the underlying response stream without any buffering or copying. + /// This provides optimal memory efficiency and minimal latency for in-order part arrivals. + /// + /// COMPLETION DETECTION: + /// The source is marked complete when: + /// 1. Stream returns 0 bytes (normal EOF), OR + /// 2. We've read the expected number of bytes (ContentLength) + /// + /// ERROR HANDLING: + /// Any exceptions from the underlying stream (network errors, timeout, etc.) propagate directly + /// to the caller. The PartBufferManager will handle cleanup and error recovery. + /// + public async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + if (_isComplete) + { + Logger.DebugFormat("StreamingDataSource: [Part {0}] Already complete, returning 0 bytes", PartNumber); + return 0; + } + + try + { + Logger.DebugFormat("StreamingDataSource: [Part {0}] Reading up to {1} bytes from response stream (BytesRead={2}/{3})", + PartNumber, count, _bytesRead, _expectedBytes); + + // Direct delegation to response stream - no buffering, just pass-through + var bytesRead = await _responseStream.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); + + _bytesRead += bytesRead; + + Logger.DebugFormat("StreamingDataSource: [Part {0}] Read {1} bytes from response stream (TotalBytesRead={2}/{3})", + PartNumber, bytesRead, _bytesRead, _expectedBytes); + + // Mark complete when stream exhausted OR we've read expected bytes + if (bytesRead == 0 || _bytesRead >= _expectedBytes) + { + _isComplete = true; + Logger.DebugFormat("StreamingDataSource: [Part {0}] Marked complete (BytesRead=0: {1}, ReachedExpected: {2})", + PartNumber, bytesRead == 0, _bytesRead >= _expectedBytes); + } + + return bytesRead; + } + catch (Exception ex) + { + Logger.Error(ex, "StreamingDataSource: [Part {0}] Error reading from response stream: {1}", + PartNumber, ex.Message); + + // Mark as complete on error to prevent further read attempts + _isComplete = true; + throw; + } + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(StreamingDataSource)); + } + + /// + /// + /// CRITICAL: Disposes the GetObjectResponse, which releases the HTTP connection back to the connection pool. + /// Failure to dispose will cause connection leaks and eventual connection pool exhaustion. + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + Logger.DebugFormat("StreamingDataSource: [Part {0}] Disposing (Releasing HTTP connection, BytesRead={1}/{2})", + PartNumber, _bytesRead, _expectedBytes); + + // Dispose the response - this releases the HTTP connection + _response?.Dispose(); + } + catch (Exception ex) + { + Logger.Error(ex, "StreamingDataSource: [Part {0}] Error during disposal: {1}", + PartNumber, ex.Message); + + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs index e7131cdc208d..6a98b8fbaba2 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs @@ -1,15 +1,20 @@ using Amazon.S3.Model; -using Amazon.S3.Transfer; using Amazon.S3.Transfer.Internal; using Microsoft.VisualStudio.TestTools.UnitTesting; using Moq; using System; +using System.Buffers; using System.IO; +using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; namespace AWSSDK.UnitTests { + /// + /// Unit tests for BufferedPartDataHandler class. + /// Tests intelligent stream-vs-buffer decision making for multipart downloads. + /// [TestClass] public class BufferedPartDataHandlerTests { @@ -19,14 +24,17 @@ public class BufferedPartDataHandlerTests public void Constructor_WithValidParameters_CreatesHandler() { // Arrange - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); // Act var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); // Assert Assert.IsNotNull(handler); + + // Cleanup + handler.Dispose(); } [TestMethod] @@ -38,425 +46,649 @@ public void Constructor_WithNullBufferManager_ThrowsArgumentNullException() // Act var handler = new BufferedPartDataHandler(null, config); + + // Assert - ExpectedException } [TestMethod] [ExpectedException(typeof(ArgumentNullException))] - public void Constructor_WithNullConfig_ThrowsArgumentNullException() + public void Constructor_WithNullConfiguration_ThrowsArgumentNullException() { // Arrange var mockBufferManager = new Mock(); // Act var handler = new BufferedPartDataHandler(mockBufferManager.Object, null); + + // Assert - ExpectedException } #endregion - #region ProcessPartAsync Tests - Basic Functionality + #region ProcessPartAsync Tests - In-Order (Streaming Path) [TestMethod] - public async Task ProcessPartAsync_BuffersPartData() + public async Task ProcessPartAsync_InOrderPart_CreatesStreamingDataSource() { // Arrange - var partSize = 8 * 1024 * 1024; // 8MB - var partData = new byte[partSize]; - new Random().NextBytes(partData); - - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + IPartDataSource capturedDataSource = null; + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((ds) => capturedDataSource = ds); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = partSize, - ResponseStream = new MemoryStream(partData) - }; + var response = CreateMockGetObjectResponse(512); - // Act - await handler.ProcessPartAsync(1, response, CancellationToken.None); + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + Assert.IsNotNull(capturedDataSource); + Assert.IsInstanceOfType(capturedDataSource, typeof(StreamingDataSource)); + Assert.AreEqual(1, capturedDataSource.PartNumber); - // Assert - should add buffer to manager - mockBufferManager.Verify( - x => x.AddBuffer(It.IsAny()), - Times.Once); + // Cleanup + capturedDataSource?.Dispose(); + } + finally + { + handler.Dispose(); + } } [TestMethod] - public async Task ProcessPartAsync_ReadsExactContentLength() + public async Task ProcessPartAsync_InOrderPart_ReleasesCapacityImmediately() { // Arrange - var partSize = 1024; - var partData = new byte[partSize]; - new Random().NextBytes(partData); - - StreamPartBuffer capturedBuffer = null; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) - .Callback((buffer) => capturedBuffer = buffer); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = partSize, - ResponseStream = new MemoryStream(partData) - }; + var response = CreateMockGetObjectResponse(512); - // Act - await handler.ProcessPartAsync(1, response, CancellationToken.None); + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); - // Assert - Assert.IsNotNull(capturedBuffer); - Assert.AreEqual(partSize, capturedBuffer.Length); - Assert.AreEqual(1, capturedBuffer.PartNumber); + // Assert - ReleaseBufferSpace should be called (through ReleaseCapacity) + // Handler calls ReleaseBufferSpace directly, which eventually calls the manager's method + // We verify the AddBuffer was called with a StreamingDataSource + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource)), Times.Once); + } + finally + { + handler.Dispose(); + } } [TestMethod] - public async Task ProcessPartAsync_HandlesSmallPart() + public async Task ProcessPartAsync_InOrderPart_DoesNotDisposeResponse() { // Arrange - var partSize = 100; // Very small - var partData = new byte[partSize]; - new Random().NextBytes(partData); - - StreamPartBuffer capturedBuffer = null; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) - .Callback((buffer) => capturedBuffer = buffer); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = partSize, - ResponseStream = new MemoryStream(partData) - }; + var response = CreateMockGetObjectResponse(512); - // Act - await handler.ProcessPartAsync(1, response, CancellationToken.None); + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); - // Assert - Assert.IsNotNull(capturedBuffer); - Assert.AreEqual(partSize, capturedBuffer.Length); + // Assert - Response stream should still be readable (not disposed) + // The StreamingDataSource now owns it and will dispose it later + Assert.IsTrue(response.ResponseStream.CanRead); + } + finally + { + handler.Dispose(); + } } [TestMethod] - public async Task ProcessPartAsync_HandlesLargePart() + public async Task ProcessPartAsync_MultipleInOrderParts_AllStreamDirectly() { // Arrange - var partSize = 16 * 1024 * 1024; // 16MB - var partData = new byte[partSize]; - new Random().NextBytes(partData); - - StreamPartBuffer capturedBuffer = null; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) - .Callback((buffer) => capturedBuffer = buffer); + var streamingCount = 0; + + mockBufferManager.Setup(m => m.NextExpectedPartNumber) + .Returns(() => streamingCount + 1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((ds) => + { + if (ds is StreamingDataSource) + streamingCount++; + }); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = partSize, - ResponseStream = new MemoryStream(partData) - }; - - // Act - await handler.ProcessPartAsync(1, response, CancellationToken.None); + // Act - Process parts 1, 2, 3 in order + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(2, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); - // Assert - Assert.IsNotNull(capturedBuffer); - Assert.AreEqual(partSize, capturedBuffer.Length); + // Assert - All should be streaming + Assert.AreEqual(3, streamingCount); + } + finally + { + handler.Dispose(); + } } #endregion - - #region ProcessPartAsync Tests - Data Integrity + #region ProcessPartAsync Tests - Out-of-Order (Buffering Path) [TestMethod] - public async Task ProcessPartAsync_PreservesDataIntegrity() + public async Task ProcessPartAsync_OutOfOrderPart_BuffersToMemory() { // Arrange - var partSize = 1024 * 1024; // 1MB - var partData = new byte[partSize]; - new Random(42).NextBytes(partData); // Seeded for reproducibility + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); StreamPartBuffer capturedBuffer = null; - var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())) + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) .Callback((buffer) => capturedBuffer = buffer); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = partSize, - ResponseStream = new MemoryStream(partData) - }; + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); - // Act - await handler.ProcessPartAsync(1, response, CancellationToken.None); + // Act - Process part 2 when expecting part 1 (out of order) + await handler.ProcessPartAsync(2, response, CancellationToken.None); - // Assert - verify data matches exactly - Assert.IsNotNull(capturedBuffer); - var bufferedData = new byte[capturedBuffer.Length]; - Buffer.BlockCopy(capturedBuffer.ArrayPoolBuffer, 0, bufferedData, 0, capturedBuffer.Length); + // Assert + Assert.IsNotNull(capturedBuffer); + Assert.AreEqual(2, capturedBuffer.PartNumber); + Assert.AreEqual(512, capturedBuffer.Length); - CollectionAssert.AreEqual(partData, bufferedData); + // Verify data was buffered correctly + byte[] bufferData = new byte[512]; + Buffer.BlockCopy(capturedBuffer.ArrayPoolBuffer, 0, bufferData, 0, 512); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, bufferData, 0, 512)); + } + finally + { + handler.Dispose(); + } } [TestMethod] - public async Task ProcessPartAsync_HandlesZeroByteResponse() + public async Task ProcessPartAsync_OutOfOrderPart_DisposesResponse() { // Arrange - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = 0, - ResponseStream = new MemoryStream(Array.Empty()) - }; + var response = CreateMockGetObjectResponse(512); - // Act - await handler.ProcessPartAsync(1, response, CancellationToken.None); + // Act - Process out of order part + await handler.ProcessPartAsync(3, response, CancellationToken.None); - // Assert - should handle empty response gracefully - mockBufferManager.Verify( - x => x.AddBuffer(It.IsAny()), - Times.Once); + // Assert - Response should be disposed after buffering + // After disposal, stream is either null or no longer readable + Assert.IsTrue(response.ResponseStream == null || !response.ResponseStream.CanRead); + } + finally + { + handler.Dispose(); + } } [TestMethod] - public async Task ProcessPartAsync_WithUnexpectedEOF_ThrowsIOException() + public async Task ProcessPartAsync_OutOfOrderPart_DoesNotReleaseCapacityImmediately() { // Arrange - var expectedBytes = 1024 * 1024; // 1MB expected - var actualBytes = 512 * 1024; // 512KB available (premature EOF) - var partData = new byte[actualBytes]; - new Random().NextBytes(partData); - - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Create a response that promises more bytes than it delivers - var response = new GetObjectResponse + try { - ContentLength = expectedBytes, // Promise 1MB - ResponseStream = new MemoryStream(partData), // Only deliver 512KB - ResponseMetadata = new Amazon.Runtime.ResponseMetadata() - }; + var response = CreateMockGetObjectResponse(512); + + // Act + await handler.ProcessPartAsync(2, response, CancellationToken.None); - // Act & Assert - var exception = await Assert.ThrowsExceptionAsync( - async () => await handler.ProcessPartAsync(1, response, CancellationToken.None)); + // Assert - AddBuffer should be called with StreamPartBuffer (not IPartDataSource) + mockBufferManager.Verify(m => m.AddBuffer( + It.IsAny()), Times.Once); - // Verify exception message contains key information - StringAssert.Contains(exception.Message, expectedBytes.ToString()); - StringAssert.Contains(exception.Message, actualBytes.ToString()); + // Note: Capacity will be released later when the buffer is consumed by the reader + } + finally + { + handler.Dispose(); + } } + #endregion + + #region ProcessPartAsync Tests - Mixed Scenarios + [TestMethod] - public async Task ProcessPartAsync_WithUnexpectedEOF_DoesNotBufferPartialData() + public async Task ProcessPartAsync_MixedInOrderAndOutOfOrder_HandlesCorrectly() { // Arrange - var expectedBytes = 1024 * 1024; // 1MB expected - var actualBytes = 512 * 1024; // 512KB available (premature EOF) - var partData = new byte[actualBytes]; - new Random().NextBytes(partData); - - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + var mockBufferManager = new Mock(); + + var currentExpectedPart = 1; + mockBufferManager.Setup(m => m.NextExpectedPartNumber) + .Returns(() => currentExpectedPart); + + var streamingParts = 0; + var bufferedParts = 0; + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((ds) => + { + if (ds is StreamingDataSource) + { + streamingParts++; + currentExpectedPart++; + } + }); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((buffer) => bufferedParts++); - var response = new GetObjectResponse - { - ContentLength = expectedBytes, - ResponseStream = new MemoryStream(partData), - ResponseMetadata = new Amazon.Runtime.ResponseMetadata() - }; + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act try { - await handler.ProcessPartAsync(1, response, CancellationToken.None); - Assert.Fail("Expected StreamSizeMismatchException was not thrown"); + // Act - Mixed order: 1 (in), 3 (out), 2 (in after advance) + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(2, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + Assert.AreEqual(2, streamingParts); // Parts 1 and 2 streamed + Assert.AreEqual(1, bufferedParts); // Part 3 buffered } - catch (Amazon.S3.Model.StreamSizeMismatchException) + finally { - // Expected + handler.Dispose(); } - - // Assert - should NOT have added any buffer to manager since download failed - mockBufferManager.Verify( - x => x.AddBuffer(It.IsAny()), - Times.Never); } - #endregion - - #region ProcessPartAsync Tests - Cancellation - [TestMethod] - [ExpectedException(typeof(TaskCanceledException))] - public async Task ProcessPartAsync_WithCancelledToken_ThrowsTaskCanceledException() + public async Task ProcessPartAsync_InOrderFollowedByOutOfOrder_HandlesCorrectly() { // Arrange - var partSize = 8 * 1024 * 1024; - var partData = new byte[partSize]; - - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + mockBufferManager.SetupSequence(m => m.NextExpectedPartNumber) + .Returns(1) + .Returns(2); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = partSize, - ResponseStream = new MemoryStream(partData) - }; + // Act + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource && ds.PartNumber == 1)), Times.Once); + + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(b => b.PartNumber == 3)), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_OutOfOrderFollowedByInOrder_HandlesCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + // NextExpectedPartNumber is called multiple times per part, so provide enough values + // Part 2 (out of order): calls it twice, should return 1 both times + // Part 1 (in order): calls it twice, should return 1 both times + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); - var cts = new CancellationTokenSource(); - cts.Cancel(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act - await handler.ProcessPartAsync(1, response, cts.Token); + try + { + // Act + await handler.ProcessPartAsync(2, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(b => b.PartNumber == 2)), Times.Once); + + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource && ds.PartNumber == 1)), Times.Once); + } + finally + { + handler.Dispose(); + } } [TestMethod] - public async Task ProcessPartAsync_CallsAddBufferOnce() + public async Task ProcessPartAsync_InOrderVsOutOfOrder_VerifyStreamingVsBufferingBehavior() { // Arrange - var partSize = 1024; - var partData = new byte[partSize]; - + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.AddBuffer(It.IsAny())); + + // Track what types are added to verify memory allocation patterns + var streamingPartNumbers = new List(); // Parts that stream (no ArrayPool allocation) + var bufferedPartNumbers = new List(); // Parts that buffer (use ArrayPool) + + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + // Capture StreamingDataSource additions (streaming path - NO ArrayPool allocation) + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((ds) => + { + streamingPartNumbers.Add(ds.PartNumber); + }); + + // Capture StreamPartBuffer additions (buffering path - USES ArrayPool) + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((buffer) => + { + bufferedPartNumbers.Add(buffer.PartNumber); + }); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var response = new GetObjectResponse + try { - ContentLength = partSize, - ResponseStream = new MemoryStream(partData) - }; + // Act - Process part 1 (in order - should stream, no ArrayPool buffer) + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Process part 3 (out of order - should buffer via ArrayPool) + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + // Part 1 should use streaming path (no ArrayPool allocation) + Assert.AreEqual(1, streamingPartNumbers.Count, "Expected exactly 1 part to stream"); + Assert.AreEqual(1, streamingPartNumbers[0], "Part 1 should stream directly"); + + // Part 3 should use buffering path (ArrayPool allocation) + Assert.AreEqual(1, bufferedPartNumbers.Count, "Expected exactly 1 part to be buffered"); + Assert.AreEqual(3, bufferedPartNumbers[0], "Part 3 should be buffered"); + + // Verify ReleaseBufferSpace was called for streaming path (immediate capacity release) + mockBufferManager.Verify(m => m.ReleaseBufferSpace(), Times.Once, + "Streaming path should release capacity immediately"); + } + finally + { + handler.Dispose(); + } + } - var cts = new CancellationTokenSource(); + [TestMethod] + public async Task ProcessPartAsync_AllInOrderParts_NoBufferingAllStreaming() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + var streamingPartNumbers = new List(); + var bufferedPartNumbers = new List(); + var currentExpectedPart = 1; + + mockBufferManager.Setup(m => m.NextExpectedPartNumber) + .Returns(() => currentExpectedPart); + + // Capture streaming additions + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((ds) => + { + streamingPartNumbers.Add(ds.PartNumber); + currentExpectedPart++; // Advance expected part after streaming + }); + + // Capture buffering additions + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((buffer) => + { + bufferedPartNumbers.Add(buffer.PartNumber); + }); - // Act - await handler.ProcessPartAsync(1, response, cts.Token); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Assert - verify AddBuffer was called exactly once - mockBufferManager.Verify(x => x.AddBuffer(It.IsAny()), Times.Once); + try + { + // Act - Process 5 parts in perfect order + for (int i = 1; i <= 5; i++) + { + await handler.ProcessPartAsync(i, CreateMockGetObjectResponse(512), CancellationToken.None); + } + + // Assert - Best case scenario: all parts stream, zero buffering + Assert.AreEqual(5, streamingPartNumbers.Count, "All 5 parts should stream"); + Assert.AreEqual(0, bufferedPartNumbers.Count, "No parts should be buffered when all arrive in order"); + + // Verify parts streamed in correct order + for (int i = 0; i < 5; i++) + { + Assert.AreEqual(i + 1, streamingPartNumbers[i], + $"Part {i + 1} should have streamed in order"); + } + + // Verify capacity was released 5 times (once per streaming part) + mockBufferManager.Verify(m => m.ReleaseBufferSpace(), Times.Exactly(5), + "Capacity should be released immediately for each streaming part"); + } + finally + { + handler.Dispose(); + } } #endregion - #region WaitForCapacityAsync Tests + #region ProcessPartAsync Tests - Error Handling [TestMethod] - public async Task WaitForCapacityAsync_DelegatesToBufferManager() + public async Task ProcessPartAsync_StreamingPathError_ReleasesCapacity() { // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.WaitForBufferSpaceAsync(It.IsAny())) - .Returns(Task.CompletedTask); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Throws(new InvalidOperationException("Test error")); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act - await handler.WaitForCapacityAsync(CancellationToken.None); + try + { + var response = CreateMockGetObjectResponse(512); - // Assert - mockBufferManager.Verify( - x => x.WaitForBufferSpaceAsync(It.IsAny()), - Times.Once); + // Act & Assert + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(1, response, CancellationToken.None); + }); + + // Note: Handler's ReleaseCapacity is called on error, + // which eventually calls the manager's ReleaseBufferSpace + } + finally + { + handler.Dispose(); + } } [TestMethod] - public async Task WaitForCapacityAsync_PassesCancellationToken() + public async Task ProcessPartAsync_BufferingPathError_ReleasesCapacity() { // Arrange - CancellationToken capturedToken = default; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.WaitForBufferSpaceAsync(It.IsAny())) - .Callback(ct => capturedToken = ct) - .Returns(Task.CompletedTask); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Throws(new InvalidOperationException("Test error")); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var cts = new CancellationTokenSource(); + try + { + var response = CreateMockGetObjectResponse(512); - // Act - await handler.WaitForCapacityAsync(cts.Token); + // Act & Assert + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(2, response, CancellationToken.None); + }); - // Assert - Assert.AreEqual(cts.Token, capturedToken); + // Capacity should be released on error + } + finally + { + handler.Dispose(); + } } [TestMethod] - [ExpectedException(typeof(OperationCanceledException))] - public async Task WaitForCapacityAsync_WhenCancelled_ThrowsOperationCanceledException() + public async Task ProcessPartAsync_BufferingReadError_DisposesResponseAndReleasesCapacity() { // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.WaitForBufferSpaceAsync(It.IsAny())) - .ThrowsAsync(new OperationCanceledException()); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - var cts = new CancellationTokenSource(); - cts.Cancel(); - - // Act - await handler.WaitForCapacityAsync(cts.Token); + try + { + // Create response with faulty stream + var faultyStream = new FaultyStream(new IOException("Stream read error")); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = faultyStream + }; + + // Act & Assert + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(2, response, CancellationToken.None); + }); + } + finally + { + handler.Dispose(); + } } #endregion - #region ReleaseCapacity Tests + #region WaitForCapacityAsync Tests [TestMethod] - public void ReleaseCapacity_DelegatesToBufferManager() + public async Task WaitForCapacityAsync_DelegatesToBufferManager() { // Arrange - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.WaitForBufferSpaceAsync(It.IsAny())) + .Returns(Task.CompletedTask); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act - handler.ReleaseCapacity(); + try + { + // Act + await handler.WaitForCapacityAsync(CancellationToken.None); - // Assert - mockBufferManager.Verify(x => x.ReleaseBufferSpace(), Times.Once); + // Assert + mockBufferManager.Verify(m => m.WaitForBufferSpaceAsync( + It.IsAny()), Times.Once); + } + finally + { + handler.Dispose(); + } } + #endregion + + #region ReleaseCapacity Tests + [TestMethod] - public void ReleaseCapacity_CanBeCalledMultipleTimes() + public void ReleaseCapacity_DelegatesToBufferManager() { // Arrange - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.ReleaseBufferSpace()); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act - handler.ReleaseCapacity(); - handler.ReleaseCapacity(); - handler.ReleaseCapacity(); + try + { + // Act + handler.ReleaseCapacity(); - // Assert - mockBufferManager.Verify(x => x.ReleaseBufferSpace(), Times.Exactly(3)); + // Assert + mockBufferManager.Verify(m => m.ReleaseBufferSpace(), Times.Once); + } + finally + { + handler.Dispose(); + } } #endregion @@ -464,113 +696,144 @@ public void ReleaseCapacity_CanBeCalledMultipleTimes() #region OnDownloadComplete Tests [TestMethod] - public void OnDownloadComplete_WithNullException_DelegatesToBufferManager() + public void OnDownloadComplete_DelegatesToBufferManager() { // Arrange - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.MarkDownloadComplete(It.IsAny())); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act - handler.OnDownloadComplete(null); + try + { + // Act + handler.OnDownloadComplete(null); - // Assert - mockBufferManager.Verify( - x => x.MarkDownloadComplete(null), - Times.Once); + // Assert + mockBufferManager.Verify(m => m.MarkDownloadComplete(null), Times.Once); + } + finally + { + handler.Dispose(); + } } [TestMethod] public void OnDownloadComplete_WithException_PassesExceptionToBufferManager() { // Arrange - var testException = new InvalidOperationException("Test error"); - Exception capturedEx = null; - + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockBufferManager = new Mock(); - mockBufferManager.Setup(x => x.MarkDownloadComplete(It.IsAny())) - .Callback(ex => capturedEx = ex); + var testException = new Exception("Download failed"); + mockBufferManager.Setup(m => m.MarkDownloadComplete(It.IsAny())); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act - handler.OnDownloadComplete(testException); + try + { + // Act + handler.OnDownloadComplete(testException); - // Assert - Assert.AreEqual(testException, capturedEx); + // Assert + mockBufferManager.Verify(m => m.MarkDownloadComplete(testException), Times.Once); + } + finally + { + handler.Dispose(); + } } - [TestMethod] - public void OnDownloadComplete_WithCancelledException_PassesToBufferManager() - { - // Arrange - var testException = new OperationCanceledException(); - - var mockBufferManager = new Mock(); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - - // Act - handler.OnDownloadComplete(testException); + #endregion - // Assert - mockBufferManager.Verify( - x => x.MarkDownloadComplete(It.Is(e => e == testException)), - Times.Once); - } + #region Disposal Tests [TestMethod] - public void OnDownloadComplete_CanBeCalledMultipleTimes() + public void Dispose_MultipleCalls_IsIdempotent() { // Arrange - var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - // Act - calling multiple times should work - handler.OnDownloadComplete(null); - handler.OnDownloadComplete(new Exception("test")); - handler.OnDownloadComplete(null); + // Act - Dispose multiple times + handler.Dispose(); + handler.Dispose(); + handler.Dispose(); - // Assert - mockBufferManager.Verify( - x => x.MarkDownloadComplete(It.IsAny()), - Times.Exactly(3)); + // Assert - Should not throw } #endregion - #region Dispose Tests + #region Helper Methods - [TestMethod] - public void Dispose_DoesNotDisposeBufferManager() + /// + /// Creates a mock GetObjectResponse with test data. + /// + private GetObjectResponse CreateMockGetObjectResponse(long contentLength, byte[] testData = null) { - // Arrange - var mockBufferManager = new Mock(); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); - - // Act - handler.Dispose(); + if (testData == null) + { + testData = MultipartDownloadTestHelpers.GenerateTestData((int)contentLength, 0); + } - // Assert - BufferManager is owned by caller, should not be disposed - mockBufferManager.Verify(x => x.Dispose(), Times.Never); + return new GetObjectResponse + { + ContentLength = contentLength, + ResponseStream = new MemoryStream(testData), + ETag = "test-etag" + }; } - [TestMethod] - public void Dispose_CanBeCalledMultipleTimes() + /// + /// Stream that throws exceptions for testing error handling. + /// + private class FaultyStream : Stream { - // Arrange - var mockBufferManager = new Mock(); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + private readonly Exception _exception; - // Act - handler.Dispose(); - handler.Dispose(); // Should not throw + public FaultyStream(Exception exception) + { + _exception = exception; + } - // Assert - no exception + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + public override long Length => throw new NotSupportedException(); + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override void Flush() { } + + public override int Read(byte[] buffer, int offset, int count) + { + throw _exception; + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + throw _exception; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } } #endregion diff --git a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs index b07ddce455ac..394490f1fbd2 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs @@ -1,10 +1,14 @@ using Amazon.S3.Transfer.Internal; +using Amazon.S3.Model; using Microsoft.VisualStudio.TestTools.UnitTesting; using Moq; using System; +using System.IO; +using System.Collections.Generic; using System.Buffers; using System.Threading; using System.Threading.Tasks; +using System.Linq; namespace AWSSDK.UnitTests { @@ -265,7 +269,7 @@ public void AddBuffer_WithNullBuffer_ThrowsArgumentNullException() try { // Act - manager.AddBuffer(null); + manager.AddBuffer((IPartDataSource)null); // Assert - ExpectedException } @@ -934,6 +938,295 @@ public async Task MarkDownloadComplete_SignalsWaitingReads() #endregion + #region AddBufferAsync(IPartDataSource) Tests + + [TestMethod] + public async Task AddBufferAsync_IPartDataSource_WithStreamingDataSource_AddsSuccessfully() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create a StreamingDataSource + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + + // Act + manager.AddBuffer(streamingSource); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task AddBufferAsync_IPartDataSource_WithBufferedDataSource_AddsSuccessfully() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create a BufferedDataSource + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var bufferedSource = new BufferedDataSource(partBuffer); + + // Act + manager.AddBuffer(bufferedSource); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task AddBufferAsync_IPartDataSource_WithNull_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + manager.AddBuffer((IPartDataSource)null); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task AddBufferAsync_IPartDataSource_SignalsPartAvailable() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before part is available + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(50); + + // Create and add streaming data source + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + + // Act + manager.AddBuffer(streamingSource); + + // Assert - Read should complete + int bytesRead = await readTask; + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - StreamingDataSource Integration + + [TestMethod] + public async Task ReadAsync_FromStreamingDataSource_ReadsCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create streaming data source + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = new GetObjectResponse + { + ContentLength = 1000, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + manager.AddBuffer(streamingSource); + + // Act - Read in multiple chunks + byte[] readBuffer = new byte[400]; + int bytesRead1 = await manager.ReadAsync(readBuffer, 0, 400, CancellationToken.None); + + int bytesRead2 = await manager.ReadAsync(readBuffer, 0, 400, CancellationToken.None); + + int bytesRead3 = await manager.ReadAsync(readBuffer, 0, 200, CancellationToken.None); + + // Assert + Assert.AreEqual(400, bytesRead1); + Assert.AreEqual(400, bytesRead2); + Assert.AreEqual(200, bytesRead3); + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_FromMixedSources_ReadsSequentially() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add streaming source for part 1 + var testData1 = MultipartDownloadTestHelpers.GenerateTestData(500, 0); + var response1 = new GetObjectResponse + { + ContentLength = 500, + ResponseStream = new MemoryStream(testData1) + }; + var streamingSource = new StreamingDataSource(1, response1); + manager.AddBuffer((IPartDataSource)streamingSource); + + // Add buffered source for part 2 + var testData2 = MultipartDownloadTestHelpers.GenerateTestData(500, 500); + byte[] testBuffer2 = ArrayPool.Shared.Rent(500); + Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 500); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 500); + manager.AddBuffer(partBuffer2); + + // Act - Read across both parts + byte[] readBuffer = new byte[750]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 750, CancellationToken.None); + + // Assert + Assert.AreEqual(750, bytesRead); + + // Verify first 500 bytes from streaming source + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData1, readBuffer, 0, 500)); + + // Verify next 250 bytes from buffered source + byte[] expectedData2 = new byte[250]; + Array.Copy(testData2, 0, expectedData2, 0, 250); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(expectedData2, readBuffer, 500, 250)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_StreamingDataSource_DisposesAfterCompletion() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create streaming data source + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + manager.AddBuffer(streamingSource); + + // Act - Read all data + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert - StreamingDataSource should be disposed after reading + // This is verified internally by PartBufferManager + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_MultipleStreamingSources_ReadsSequentially() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add 3 streaming sources + for (int i = 1; i <= 3; i++) + { + var testData = MultipartDownloadTestHelpers.GeneratePartSpecificData(300, i); + var response = new GetObjectResponse + { + ContentLength = 300, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(i, response); + manager.AddBuffer(streamingSource); + } + + // Act - Read across all parts + byte[] readBuffer = new byte[900]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 900, CancellationToken.None); + + // Assert + Assert.AreEqual(900, bytesRead); + Assert.AreEqual(4, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + #endregion + #region Disposal Tests [TestMethod] @@ -1003,5 +1296,122 @@ public async Task Operations_AfterDispose_ThrowObjectDisposedException() } #endregion + + #region Thread Safety Tests - Memory Visibility + + [TestMethod] + public async Task NextExpectedPartNumber_ConcurrentReads_SeeConsistentValue() + { + // This test verifies that the volatile keyword on _nextExpectedPartNumber + // prevents memory visibility issues when multiple producer threads + // read the value while the consumer thread updates it. + // + // Without volatile, producer threads may see stale cached values, + // causing incorrect stream-vs-buffer decisions. + // + // The test simulates BufferedPartDataHandler.ProcessPartAsync's pattern: + // Multiple download threads checking "partNumber == NextExpectedPartNumber" + // while the consumer thread increments NextExpectedPartNumber. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + const int NumReaderThreads = 8; + const int NumIncrements = 100; + + var readErrors = new System.Collections.Concurrent.ConcurrentBag(); + var startSignal = new ManualResetEventSlim(false); + var stopSignal = new ManualResetEventSlim(false); + + try + { + // Start multiple reader threads that continuously read NextExpectedPartNumber + var readerTasks = new Task[NumReaderThreads]; + for (int i = 0; i < NumReaderThreads; i++) + { + int threadId = i; + readerTasks[i] = Task.Run(() => + { + // Wait for start signal + startSignal.Wait(); + + int lastSeenValue = 0; + + // Aggressively read the value until stopped + while (!stopSignal.IsSet) + { + int currentValue = manager.NextExpectedPartNumber; + + // Verify we never see a value less than what we saw before + // (This would indicate stale cached reads) + if (currentValue < lastSeenValue) + { + readErrors.Add($"Thread {threadId} saw value go backwards: {lastSeenValue} -> {currentValue}"); + } + + lastSeenValue = currentValue; + + // Spin to create cache pressure + Thread.SpinWait(10); + } + }); + } + + // Start all reader threads simultaneously + startSignal.Set(); + + // Give threads time to start reading + await Task.Delay(10); + + // Simulate consumer thread incrementing NextExpectedPartNumber + // by adding and reading parts sequentially + for (int partNum = 1; partNum <= NumIncrements; partNum++) + { + // Add part + byte[] testBuffer = ArrayPool.Shared.Rent(100); + var partBuffer = new StreamPartBuffer(partNum, testBuffer, 100); + manager.AddBuffer(partBuffer); + + // Read part completely to trigger increment + byte[] readBuffer = new byte[100]; + await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // NextExpectedPartNumber should now be partNum + 1 + + // Small spin to create timing variance + Thread.SpinWait(5); + } + + // Stop reader threads + stopSignal.Set(); + + // Wait for all readers to finish + await Task.WhenAll(readerTasks); + + // Assert - No reader should have seen inconsistent values + if (readErrors.Count > 0) + { + var errorMessage = $"Memory visibility issues detected:\n{string.Join("\n", readErrors.Take(10))}"; + if (readErrors.Count > 10) + { + errorMessage += $"\n... and {readErrors.Count - 10} more errors"; + } + Assert.Fail(errorMessage); + } + + // Verify final value is correct + Assert.AreEqual(NumIncrements + 1, manager.NextExpectedPartNumber); + } + finally + { + stopSignal.Set(); // Ensure threads stop even on failure + manager.Dispose(); + startSignal.Dispose(); + stopSignal.Dispose(); + } + } + + #endregion } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/StreamingDataSourceTests.cs b/sdk/test/Services/S3/UnitTests/Custom/StreamingDataSourceTests.cs new file mode 100644 index 000000000000..1822b7f85d4f --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/StreamingDataSourceTests.cs @@ -0,0 +1,708 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for StreamingDataSource class. + /// Tests direct streaming from GetObjectResponse without buffering. + /// + [TestClass] + public class StreamingDataSourceTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidResponse_CreatesDataSource() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + + // Act + var dataSource = new StreamingDataSource(1, response); + + // Assert + Assert.IsNotNull(dataSource); + Assert.AreEqual(1, dataSource.PartNumber); + Assert.IsFalse(dataSource.IsComplete); + + // Cleanup + dataSource.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullResponse_ThrowsArgumentNullException() + { + // Act + var dataSource = new StreamingDataSource(1, null); + + // Assert - ExpectedException + } + + [TestMethod] + public void Constructor_SetsPartNumberCorrectly() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + + // Act + var dataSource = new StreamingDataSource(5, response); + + // Assert + Assert.AreEqual(5, dataSource.PartNumber); + + // Cleanup + dataSource.Dispose(); + } + + #endregion + + #region Property Tests + + [TestMethod] + public void PartNumber_ReturnsConstructorValue() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(3, response); + + try + { + // Act & Assert + Assert.AreEqual(3, dataSource.PartNumber); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public void IsComplete_InitiallyFalse() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task IsComplete_BecomesTrue_AfterFullRead() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read all data + byte[] buffer = new byte[512]; + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + + // Assert + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task IsComplete_BecomesTrue_WhenExpectedBytesReached() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read in chunks + byte[] buffer = new byte[400]; + await dataSource.ReadAsync(buffer, 0, 400, CancellationToken.None); + await dataSource.ReadAsync(buffer, 0, 400, CancellationToken.None); + await dataSource.ReadAsync(buffer, 0, 200, CancellationToken.None); + + // Assert + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Basic Functionality + + [TestMethod] + public async Task ReadAsync_ReadsDataFromResponseStream() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act + byte[] buffer = new byte[512]; + int bytesRead = await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, 512)); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_SupportsPartialReads() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read first 300 bytes + byte[] buffer = new byte[300]; + int bytesRead = await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + + // Assert + Assert.AreEqual(300, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, 300)); + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_SupportsMultipleSequentialReads() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read in chunks + byte[] buffer1 = new byte[400]; + int bytesRead1 = await dataSource.ReadAsync(buffer1, 0, 400, CancellationToken.None); + + byte[] buffer2 = new byte[400]; + int bytesRead2 = await dataSource.ReadAsync(buffer2, 0, 400, CancellationToken.None); + + byte[] buffer3 = new byte[200]; + int bytesRead3 = await dataSource.ReadAsync(buffer3, 0, 200, CancellationToken.None); + + // Assert + Assert.AreEqual(400, bytesRead1); + Assert.AreEqual(400, bytesRead2); + Assert.AreEqual(200, bytesRead3); + Assert.IsTrue(dataSource.IsComplete); + + // Verify data correctness + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + testData, buffer1, 0, 400)); + + byte[] expectedData2 = new byte[400]; + Array.Copy(testData, 400, expectedData2, 0, 400); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + expectedData2, buffer2, 0, 400)); + + byte[] expectedData3 = new byte[200]; + Array.Copy(testData, 800, expectedData3, 0, 200); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + expectedData3, buffer3, 0, 200)); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WithOffset_ReadsIntoBufferCorrectly() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(300, 0); + var response = CreateMockGetObjectResponse(300, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read into buffer with offset + byte[] buffer = new byte[500]; + int bytesRead = await dataSource.ReadAsync(buffer, 100, 300, CancellationToken.None); + + // Assert + Assert.AreEqual(300, bytesRead); + + // Verify data was written at correct offset + for (int i = 0; i < 300; i++) + { + Assert.AreEqual(testData[i], buffer[100 + i]); + } + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act + await dataSource.ReadAsync(null, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + byte[] buffer = new byte[512]; + + try + { + // Act + await dataSource.ReadAsync(buffer, -1, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + byte[] buffer = new byte[512]; + + try + { + // Act + await dataSource.ReadAsync(buffer, 0, -1, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + byte[] buffer = new byte[512]; + + try + { + // Act - offset + count exceeds buffer length + await dataSource.ReadAsync(buffer, 400, 200, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Completion Detection + + [TestMethod] + public async Task ReadAsync_ReturnsZero_WhenStreamExhausted() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = CreateMockGetObjectResponse(100, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read all data + byte[] buffer1 = new byte[100]; + int bytesRead1 = await dataSource.ReadAsync(buffer1, 0, 100, CancellationToken.None); + + // Try to read more + byte[] buffer2 = new byte[100]; + int bytesRead2 = await dataSource.ReadAsync(buffer2, 0, 100, CancellationToken.None); + + // Assert + Assert.AreEqual(100, bytesRead1); + Assert.AreEqual(0, bytesRead2); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_AfterComplete_ReturnsZero() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read all data to completion + byte[] buffer1 = new byte[512]; + await dataSource.ReadAsync(buffer1, 0, 512, CancellationToken.None); + + Assert.IsTrue(dataSource.IsComplete); + + // Try to read again after completion + byte[] buffer2 = new byte[100]; + int bytesRead = await dataSource.ReadAsync(buffer2, 0, 100, CancellationToken.None); + + // Assert + Assert.AreEqual(0, bytesRead); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_MarksComplete_WhenExpectedBytesReached() + { + // Arrange - Create response with specific ContentLength + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read exactly expected bytes + byte[] buffer = new byte[1000]; + int bytesRead = await dataSource.ReadAsync(buffer, 0, 1000, CancellationToken.None); + + // Assert + Assert.AreEqual(1000, bytesRead); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Progress Tracking + + [TestMethod] + public async Task ReadAsync_TracksProgressCorrectly() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert - Track progress through multiple reads + Assert.IsFalse(dataSource.IsComplete); + + byte[] buffer = new byte[300]; + await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + Assert.IsFalse(dataSource.IsComplete); // 300/1000 + + await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + Assert.IsFalse(dataSource.IsComplete); // 600/1000 + + await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + Assert.IsFalse(dataSource.IsComplete); // 900/1000 + + await dataSource.ReadAsync(buffer, 0, 100, CancellationToken.None); + Assert.IsTrue(dataSource.IsComplete); // 1000/1000 + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Error Handling + + [TestMethod] + public async Task ReadAsync_OnStreamError_MarksComplete() + { + // Arrange - Create a response with a stream that throws + var errorStream = new FaultyStream(new IOException("Stream read error")); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = errorStream + }; + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert + byte[] buffer = new byte[512]; + await Assert.ThrowsExceptionAsync(async () => + { + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + }); + + // Should mark as complete on error + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_PropagatesStreamExceptions() + { + // Arrange + var errorStream = new FaultyStream(new InvalidOperationException("Test error")); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = errorStream + }; + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert + byte[] buffer = new byte[512]; + await Assert.ThrowsExceptionAsync(async () => + { + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + }); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_ReleasesResponse() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + // Act + dataSource.Dispose(); + + // Assert - Response stream should be disposed + // After disposal, stream is either null or no longer readable + Assert.IsTrue(response.ResponseStream == null || !response.ResponseStream.CanRead); + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + // Act - Dispose multiple times + dataSource.Dispose(); + dataSource.Dispose(); + dataSource.Dispose(); + + // Assert - Should not throw + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task ReadAsync_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + dataSource.Dispose(); + + // Act + byte[] buffer = new byte[512]; + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public void PartNumber_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + dataSource.Dispose(); + + // Act + var partNumber = dataSource.PartNumber; + + // Assert - ExpectedException + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public void IsComplete_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + dataSource.Dispose(); + + // Act + var isComplete = dataSource.IsComplete; + + // Assert - ExpectedException + } + + #endregion + + #region Helper Methods + + /// + /// Creates a mock GetObjectResponse with test data. + /// + private GetObjectResponse CreateMockGetObjectResponse(long contentLength, byte[] testData = null) + { + if (testData == null) + { + testData = MultipartDownloadTestHelpers.GenerateTestData((int)contentLength, 0); + } + + return new GetObjectResponse + { + ContentLength = contentLength, + ResponseStream = new MemoryStream(testData), + ETag = "test-etag" + }; + } + + /// + /// Stream that throws exceptions for testing error handling. + /// + private class FaultyStream : Stream + { + private readonly Exception _exception; + + public FaultyStream(Exception exception) + { + _exception = exception; + } + + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + public override long Length => throw new NotSupportedException(); + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override void Flush() { } + + public override int Read(byte[] buffer, int offset, int count) + { + throw _exception; + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + throw _exception; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } + + #endregion + } +} From ba5b9eae4bab9c46bbe4b1c6ca76f425f3592980 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 1 Dec 2025 17:34:40 -0500 Subject: [PATCH 32/56] Update initiated, complete and failed events to not fire in background (#4170) --- .../S3/Custom/Transfer/TransferUtilityDownloadRequest.cs | 6 +++--- .../S3/Custom/Transfer/TransferUtilityUploadRequest.cs | 6 +++--- .../Services/S3/IntegrationTests/TransferUtilityTests.cs | 6 +----- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs index f7ba5f97b943..95db655a02f7 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs @@ -193,7 +193,7 @@ internal void OnRaiseProgressEvent(WriteObjectProgressArgs progressArgs) /// DownloadInitiatedEventArgs args internal void OnRaiseTransferInitiatedEvent(DownloadInitiatedEventArgs args) { - AWSSDKUtils.InvokeInBackground(DownloadInitiatedEvent, args, this); + DownloadInitiatedEvent?.Invoke(this, args); } /// @@ -202,7 +202,7 @@ internal void OnRaiseTransferInitiatedEvent(DownloadInitiatedEventArgs args) /// DownloadCompletedEventArgs args internal void OnRaiseTransferCompletedEvent(DownloadCompletedEventArgs args) { - AWSSDKUtils.InvokeInBackground(DownloadCompletedEvent, args, this); + DownloadCompletedEvent?.Invoke(this, args); } /// @@ -211,7 +211,7 @@ internal void OnRaiseTransferCompletedEvent(DownloadCompletedEventArgs args) /// DownloadFailedEventArgs args internal void OnRaiseTransferFailedEvent(DownloadFailedEventArgs args) { - AWSSDKUtils.InvokeInBackground(DownloadFailedEvent, args, this); + DownloadFailedEvent?.Invoke(this, args); } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs index 7e54dc52d5d5..879b4395849d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs @@ -276,7 +276,7 @@ internal bool IsSetPartSize() /// UploadInitiatedEventArgs args internal void OnRaiseTransferInitiatedEvent(UploadInitiatedEventArgs args) { - AWSSDKUtils.InvokeInBackground(UploadInitiatedEvent, args, this); + UploadInitiatedEvent?.Invoke(this, args); } /// @@ -285,7 +285,7 @@ internal void OnRaiseTransferInitiatedEvent(UploadInitiatedEventArgs args) /// UploadCompletedEventArgs args internal void OnRaiseTransferCompletedEvent(UploadCompletedEventArgs args) { - AWSSDKUtils.InvokeInBackground(UploadCompletedEvent, args, this); + UploadCompletedEvent?.Invoke(this, args); } /// @@ -294,7 +294,7 @@ internal void OnRaiseTransferCompletedEvent(UploadCompletedEventArgs args) /// UploadFailedEventArgs args internal void OnRaiseTransferFailedEvent(UploadFailedEventArgs args) { - AWSSDKUtils.InvokeInBackground(UploadFailedEvent, args, this); + UploadFailedEvent?.Invoke(this, args); } diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index fda91a96e6a7..bc42edaba5e3 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -2367,11 +2367,7 @@ public void AssertEventFired() if (EventException != null) throw EventException; - // Since AWSSDKUtils.InvokeInBackground fires the event in the background it is possible that we check too early that the event has fired. In this case, we sleep and check again. - for (int retries = 1; retries < 5 && !EventFired; retries++) - { - Thread.Sleep(1000 * retries); - } + // Since events are now fired synchronously, we can check immediately without retries Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); } } From ce852a20f2bff5277a02c39ed4dc7c1be6f8f05f Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 1 Dec 2025 17:37:07 -0500 Subject: [PATCH 33/56] Move MaxInMemoryParts to request object (#4163) --- .../19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json | 2 +- .../Internal/BufferedMultipartStream.cs | 2 +- .../OpenStreamWithResponseCommand.async.cs | 2 +- .../Custom/Transfer/TransferUtilityConfig.cs | 17 -- .../TransferUtilityOpenStreamRequest.cs | 16 +- .../Transfer/_async/ITransferUtility.async.cs | 20 +- .../_bcl+netstandard/ITransferUtility.sync.cs | 20 +- .../TransferUtilityOpenStreamTests.cs | 272 ++++++++++++++++++ .../Custom/BufferedMultipartStreamTests.cs | 115 ++++++++ .../Custom/MultipartDownloadTestHelpers.cs | 8 +- .../OpenStreamWithResponseCommandTests.cs | 137 +++++++++ 11 files changed, 561 insertions(+), 50 deletions(-) diff --git a/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json index 08f9ff96ce53..4a4e87779a51 100644 --- a/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json +++ b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json @@ -4,7 +4,7 @@ "serviceName": "S3", "type": "patch", "changeLogMessages": [ - "Added MaxInMemoryParts property to TransferUtilityConfig for controlling memory usage during multipart downloads", + "Added MaxInMemoryParts property to TransferUtilityOpenStreamRequest for controlling memory usage during multipart downloads", "Added PartSize property to BaseDownloadRequest for configuring multipart download part sizes", "Added MultipartDownloadType enum and property to BaseDownloadRequest for selecting download strategy" ] diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs index 1f9f2aef5688..991793112657 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs @@ -91,7 +91,7 @@ public static BufferedMultipartStream Create(IAmazonS3 s3Client, TransferUtility var config = new BufferedDownloadConfiguration( transferConfig.ConcurrentServiceRequests, - transferConfig.MaxInMemoryParts, + request.MaxInMemoryParts, s3Client.Config.BufferSize, targetPartSize); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs index 73e94061ccd3..8a72d6b87164 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs @@ -39,7 +39,7 @@ public override async Task ExecuteAsync(Cance Logger.DebugFormat("OpenStreamWithResponseCommand: Configuration - ConcurrentServiceRequests={0}, MaxInMemoryParts={1}, BufferSize={2}", _config.ConcurrentServiceRequests, - _config.MaxInMemoryParts, + _request.MaxInMemoryParts, _s3Client.Config.BufferSize ); diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs index 993d671263fe..c652bd4a5a36 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityConfig.cs @@ -42,7 +42,6 @@ public partial class TransferUtilityConfig { long _minSizeBeforePartUpload = 16 * (long)Math.Pow(2, 20); int _concurrentServiceRequests; - int _maxInMemoryParts = 1024; // When combined with the default part size of 8MB, we get 8GB of memory being utilized as the default. /// /// Default constructor. @@ -82,21 +81,5 @@ public int ConcurrentServiceRequests this._concurrentServiceRequests = value; } } - - /// - /// Gets or sets the maximum number of parts to buffer in memory during multipart downloads. - /// The default value is 1024. - /// - public int MaxInMemoryParts - { - get { return this._maxInMemoryParts; } - set - { - if (value < 1) - value = 1; - - this._maxInMemoryParts = value; - } - } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs index 98255c63625c..ceeb5fa8e4b8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs @@ -32,7 +32,21 @@ namespace Amazon.S3.Transfer /// public class TransferUtilityOpenStreamRequest : BaseDownloadRequest { + private int _maxInMemoryParts = 1024; - + /// + /// Gets or sets the maximum number of parts to buffer in memory during multipart downloads. + /// The default value is 1024. + /// + /// + /// This property controls memory usage during streaming downloads. When combined with the + /// default part size of 8MB, the default value of 1024 parts allows up to 8GB of memory usage. + /// Adjust this value based on your application's memory constraints and performance requirements. + /// + public int MaxInMemoryParts + { + get { return this._maxInMemoryParts; } + set { this._maxInMemoryParts = value; } + } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs index 74bcc619b7f4..ac041a5e4956 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs @@ -543,22 +543,18 @@ public partial interface ITransferUtility : IDisposable /// var config = new TransferUtilityConfig /// { /// // Control how many parts download in parallel (default: 10) - /// ConcurrentServiceRequests = 20, - /// - /// // Limit memory usage by capping buffered parts (default: 1024) - /// // With 8MB parts, 1024 parts = 8GB max memory - /// MaxInMemoryParts = 512 + /// ConcurrentServiceRequests = 20 /// }; /// var transferUtility = new TransferUtility(s3Client, config); /// /// /// Use to control parallel download threads. - /// Use to limit memory consumption by capping the number + /// Use to limit memory consumption by capping the number /// of buffered parts in memory. /// /// /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. - /// Adjust if you need to limit memory usage, + /// Adjust if you need to limit memory usage, /// especially when downloading very large files or multiple files concurrently. /// /// @@ -602,17 +598,13 @@ public partial interface ITransferUtility : IDisposable /// var config = new TransferUtilityConfig /// { /// // Control how many parts download in parallel (default: 10) - /// ConcurrentServiceRequests = 20, - /// - /// // Limit memory usage by capping buffered parts (default: 1024) - /// // With 8MB parts, 1024 parts = 8GB max memory - /// MaxInMemoryParts = 512 + /// ConcurrentServiceRequests = 20 /// }; /// var transferUtility = new TransferUtility(s3Client, config); /// /// /// Use to control parallel download threads. - /// Use to limit memory consumption by capping the number + /// Use to limit memory consumption by capping the number /// of buffered parts in memory. /// /// @@ -629,7 +621,7 @@ public partial interface ITransferUtility : IDisposable /// /// /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. - /// Adjust if you need to limit memory usage, + /// Adjust if you need to limit memory usage, /// especially when downloading very large files or multiple files concurrently. /// /// diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index eeb52f04050c..979fc54daf9f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -290,22 +290,18 @@ public partial interface ITransferUtility /// var config = new TransferUtilityConfig /// { /// // Control how many parts download in parallel (default: 10) - /// ConcurrentServiceRequests = 20, - /// - /// // Limit memory usage by capping buffered parts (default: 1024) - /// // With 8MB parts, 1024 parts = 8GB max memory - /// MaxInMemoryParts = 512 + /// ConcurrentServiceRequests = 20 /// }; /// var transferUtility = new TransferUtility(s3Client, config); /// /// /// Use to control parallel download threads. - /// Use to limit memory consumption by capping the number + /// Use to limit memory consumption by capping the number /// of buffered parts in memory. /// /// /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. - /// Adjust if you need to limit memory usage, + /// Adjust if you need to limit memory usage, /// especially when downloading very large files or multiple files concurrently. /// /// @@ -348,17 +344,13 @@ public partial interface ITransferUtility /// var config = new TransferUtilityConfig /// { /// // Control how many parts download in parallel (default: 10) - /// ConcurrentServiceRequests = 20, - /// - /// // Limit memory usage by capping buffered parts (default: 1024) - /// // With 8MB parts, 1024 parts = 8GB max memory - /// MaxInMemoryParts = 512 + /// ConcurrentServiceRequests = 20 /// }; /// var transferUtility = new TransferUtility(s3Client, config); /// /// /// Use to control parallel download threads. - /// Use to limit memory consumption by capping the number + /// Use to limit memory consumption by capping the number /// of buffered parts in memory. /// /// @@ -375,7 +367,7 @@ public partial interface ITransferUtility /// /// /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. - /// Adjust if you need to limit memory usage, + /// Adjust if you need to limit memory usage, /// especially when downloading very large files or multiple files concurrently. /// /// diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs index b78f19ecacb3..f463c4f7da57 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs @@ -390,6 +390,278 @@ await Client.PutObjectAsync(new PutObjectRequest #endregion + #region MaxInMemoryParts Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + public async Task OpenStream_WithCustomMaxInMemoryParts_DownloadsSuccessfully() + { + // Arrange - Upload as multipart to test MaxInMemoryParts buffering + var objectSize = 32 * MB; + var uploadPartSize = 8 * MB; // Force multipart upload with 4 parts + var downloadPartSize = 8 * MB; + var maxInMemoryParts = 2; // Only buffer 2 parts in memory at once + var key = UtilityMethods.GenerateName("maxinmemory-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize // Force multipart upload + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test MaxInMemoryParts"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize, + MaxInMemoryParts = maxInMemoryParts + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should not be null"); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Downloaded data checksum should match with custom MaxInMemoryParts"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match with custom MaxInMemoryParts"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + public async Task OpenStream_WithDefaultMaxInMemoryParts_DownloadsSuccessfully() + { + // Arrange - Upload as multipart, download without specifying MaxInMemoryParts + var objectSize = 24 * MB; + var uploadPartSize = 8 * MB; + var downloadPartSize = 8 * MB; + var key = UtilityMethods.GenerateName("default-maxinmemory-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize + // MaxInMemoryParts not specified - should use default (1024) + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Downloaded data checksum should match with default MaxInMemoryParts"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match with default MaxInMemoryParts"); + } + } + + [DataTestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + [DataRow(1, DisplayName = "MaxInMemoryParts = 1 (minimal buffering)")] + [DataRow(2, DisplayName = "MaxInMemoryParts = 2")] + [DataRow(4, DisplayName = "MaxInMemoryParts = 4")] + [DataRow(10, DisplayName = "MaxInMemoryParts = 10")] + public async Task OpenStream_WithVariousMaxInMemoryParts_DownloadsSuccessfully(int maxInMemoryParts) + { + // Arrange - Upload as multipart, test various MaxInMemoryParts values + var objectSize = 24 * MB; + var uploadPartSize = 8 * MB; // Creates 3 parts + var downloadPartSize = 8 * MB; + var key = UtilityMethods.GenerateName($"maxinmemory-{maxInMemoryParts}-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize, + MaxInMemoryParts = maxInMemoryParts + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response, $"Response should not be null with MaxInMemoryParts={maxInMemoryParts}"); + Assert.IsNotNull(response.ResponseStream, + $"ResponseStream should not be null with MaxInMemoryParts={maxInMemoryParts}"); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + $"Downloaded data checksum should match with MaxInMemoryParts={maxInMemoryParts}"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + $"Downloaded size should match with MaxInMemoryParts={maxInMemoryParts}"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + public async Task OpenStream_LargeObjectWithSmallMaxInMemoryParts_DownloadsSuccessfully() + { + // Arrange - Test memory-constrained scenario with large object + // This simulates downloading a large file while limiting memory usage + var objectSize = 40 * MB; + var uploadPartSize = 8 * MB; // Creates 5 parts + var downloadPartSize = 8 * MB; + var maxInMemoryParts = 2; // Only buffer 2 parts (16MB) instead of all 5 (40MB) + var key = UtilityMethods.GenerateName("large-maxinmemory-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize, + MaxInMemoryParts = maxInMemoryParts + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + ValidateHeaders(response, objectSize); + + // Read in smaller chunks to simulate streaming consumption + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(1 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Large object should download correctly with limited MaxInMemoryParts"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match for large object with limited MaxInMemoryParts"); + } + } + + #endregion + #region Helper Methods /// diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs index e56a20cedce4..bf0e14a6dda3 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs @@ -796,6 +796,121 @@ public async Task FlushAsync_Completes() #endregion + #region MaxInMemoryParts Tests + + [TestMethod] + public void Create_UsesRequestMaxInMemoryParts_NotConfig() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = 256; // Set custom value on request + + var transferConfig = new TransferUtilityConfig + { + ConcurrentServiceRequests = 20 // TransferUtilityConfig no longer has MaxInMemoryParts + }; + + // Act + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + // Assert + Assert.IsNotNull(stream); + // Verify the stream was created successfully with request's MaxInMemoryParts + } + + [TestMethod] + public async Task Create_WithCustomMaxInMemoryParts_FlowsToConfiguration() + { + // Arrange + var customMaxParts = 512; + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = customMaxParts; + + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read from stream to verify it works with custom MaxInMemoryParts + var buffer = new byte[512]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, "Should successfully read with custom MaxInMemoryParts"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Create_WithDefaultMaxInMemoryParts_UsesRequestDefault() + { + // Arrange - Don't set MaxInMemoryParts explicitly, should use request's default (1024) + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + // Don't set request.MaxInMemoryParts - should default to 1024 + + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[512]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, "Should work with default MaxInMemoryParts from request"); + + // Cleanup + stream.Dispose(); + } + + [DataTestMethod] + [DataRow(1, DisplayName = "Minimum MaxInMemoryParts (1)")] + [DataRow(10, DisplayName = "Small MaxInMemoryParts (10)")] + [DataRow(512, DisplayName = "Medium MaxInMemoryParts (512)")] + [DataRow(2048, DisplayName = "Large MaxInMemoryParts (2048)")] + public async Task Create_WithVariousMaxInMemoryParts_WorksCorrectly(int maxInMemoryParts) + { + // Arrange + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = maxInMemoryParts; + + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 2 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read some data + var buffer = new byte[1024]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, + $"Should successfully process download with MaxInMemoryParts={maxInMemoryParts}"); + + // Cleanup + stream.Dispose(); + } + + #endregion + #region Synchronous Read Tests [TestMethod] diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs index 8dd37d092de0..c6ceb400d2c1 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs @@ -345,7 +345,8 @@ public static TransferUtilityOpenStreamRequest CreateOpenStreamRequest( string bucketName = "test-bucket", string key = "test-key", long? partSize = null, - MultipartDownloadType downloadType = MultipartDownloadType.PART) + MultipartDownloadType downloadType = MultipartDownloadType.PART, + int? maxInMemoryParts = null) { var request = new TransferUtilityOpenStreamRequest { @@ -359,6 +360,11 @@ public static TransferUtilityOpenStreamRequest CreateOpenStreamRequest( request.PartSize = partSize.Value; } + if (maxInMemoryParts.HasValue) + { + request.MaxInMemoryParts = maxInMemoryParts.Value; + } + return request; } diff --git a/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs index ab98d371bc95..40156c316ac7 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs @@ -383,5 +383,142 @@ public async Task ExecuteAsync_EndToEnd_Multipart() } #endregion + + #region MaxInMemoryParts Tests + + [TestMethod] + public async Task ExecuteAsync_UsesRequestMaxInMemoryParts() + { + // Arrange + var customMaxParts = 256; + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = customMaxParts; + + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + // Stream should be created successfully with request's MaxInMemoryParts value + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_WithDefaultMaxInMemoryParts_WorksCorrectly() + { + // Arrange - Use default MaxInMemoryParts from request (1024) + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(2048, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + // Don't explicitly set MaxInMemoryParts - should use default of 1024 + + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + + // Should work with default value + var buffer = new byte[1024]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.IsTrue(bytesRead > 0, "Should successfully read with default MaxInMemoryParts"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [DataTestMethod] + [DataRow(1, DisplayName = "Minimum (1 part)")] + [DataRow(128, DisplayName = "Small (128 parts)")] + [DataRow(1024, DisplayName = "Default (1024 parts)")] + [DataRow(2048, DisplayName = "Large (2048 parts)")] + public async Task ExecuteAsync_WithVariousMaxInMemoryParts_CreatesStreamSuccessfully( + int maxInMemoryParts) + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = maxInMemoryParts; + + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.IsInstanceOfType(response.ResponseStream, typeof(BufferedMultipartStream)); + + // Verify stream works + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.IsNotNull(stream.DiscoveryResult); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_MultipartWithCustomMaxInMemoryParts_IntegrationTest() + { + // Arrange - Larger multipart download with custom memory limit + var customMaxParts = 64; // Lower memory limit for this test + var totalParts = 10; + var partSize = 5 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multipart-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + request.MaxInMemoryParts = customMaxParts; + + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 3 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.AreEqual(totalParts, stream.DiscoveryResult.TotalParts); + + // Verify we can read from the stream with custom MaxInMemoryParts + var buffer = new byte[1024 * 1024]; // 1MB buffer + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + Assert.IsTrue(bytesRead > 0, + $"Should successfully read multipart download with MaxInMemoryParts={customMaxParts}"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion } } From d10e67b9dfa9de434db26003fe1936630c74b929 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Mon, 1 Dec 2025 17:47:39 -0500 Subject: [PATCH 34/56] Add error message for multipart download when using Encryption Client (#4171) --- .../Internal/MultipartDownloadManager.cs | 9 +++++ .../Custom/MultipartDownloadManagerTests.cs | 39 +++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 273e8c905092..b4e395ccf019 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -99,6 +99,7 @@ private Logger Logger /// /// /// + /// Thrown when using S3 encryption client, which does not support multipart downloads. public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler = null) : this(s3Client, request, config, dataHandler, requestEventHandler, null) { @@ -137,12 +138,20 @@ public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, /// /// /// + /// Thrown when using S3 encryption client, which does not support multipart downloads. public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler, SemaphoreSlim sharedHttpThrottler) { _s3Client = s3Client ?? throw new ArgumentNullException(nameof(s3Client)); _request = request ?? throw new ArgumentNullException(nameof(request)); _config = config ?? throw new ArgumentNullException(nameof(config)); _dataHandler = dataHandler ?? throw new ArgumentNullException(nameof(dataHandler)); + + // Validate that S3 encryption client is not being used for multipart downloads + if (_s3Client is Amazon.S3.Internal.IAmazonS3Encryption) + { + throw new NotSupportedException("Multipart download is not supported when using Amazon.S3.Internal.IAmazonS3Encryption client. Please use the Amazon.S3.AmazonS3Client for multipart download."); + } + _requestEventHandler = requestEventHandler; // Use shared throttler if provided, otherwise create our own diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index 49bd3b794adc..3f068d85af40 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -151,6 +151,45 @@ public void Constructor_WithNullParameter_ThrowsArgumentNullException( var coordinator = new MultipartDownloadManager(client, request, config, handler); } + [TestMethod] + [ExpectedException(typeof(NotSupportedException))] + public void Constructor_WithEncryptionClient_ThrowsNotSupportedException() + { + // Arrange + var mockEncryptionClient = new Mock(); + mockEncryptionClient.As(); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockDataHandler = CreateMockDataHandler(); + + // Act + var coordinator = new MultipartDownloadManager(mockEncryptionClient.Object, request, config, mockDataHandler.Object); + } + + [TestMethod] + public void Constructor_WithEncryptionClient_ExceptionMessageIsDescriptive() + { + // Arrange + var mockEncryptionClient = new Mock(); + mockEncryptionClient.As(); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockDataHandler = CreateMockDataHandler(); + + // Act & Assert + try + { + var coordinator = new MultipartDownloadManager(mockEncryptionClient.Object, request, config, mockDataHandler.Object); + Assert.Fail("Expected NotSupportedException was not thrown"); + } + catch (NotSupportedException ex) + { + Assert.IsTrue(ex.Message.Contains("Multipart download is not supported when using Amazon.S3.Internal.IAmazonS3Encryption client. Please use the Amazon.S3.AmazonS3Client for multipart download.")); + } + } + #endregion #region Property Tests From c2109a61b6fd307ce02c7186d067881fc1399359 Mon Sep 17 00:00:00 2001 From: Philippe El Asmar <53088140+philasmar@users.noreply.github.com> Date: Wed, 3 Dec 2025 11:01:17 -0500 Subject: [PATCH 35/56] add failure policy to upload directory (#4181) --- .../c49077d9-90b3-437f-b316-6d8d8833ae77.json | 12 + .../Generators/SourceFiles/AssemblyInfo.cs | 58 ++-- .../Generators/SourceFiles/AssemblyInfo.tt | 4 + .../Transfer/{Model => }/DirectoryResult.cs | 2 +- .../Transfer/{Model => }/FailurePolicy.cs | 2 +- .../Internal/DownloadDirectoryCommand.cs | 1 - .../Internal/UploadDirectoryCommand.cs | 8 + .../Internal/_async/BaseCommand.async.cs | 25 -- .../DownloadDirectoryCommand.cs | 1 - .../UploadDirectoryCommand.cs | 74 ++++- ...TransferUtilityDownloadDirectoryRequest.cs | 1 - ...ransferUtilityDownloadDirectoryResponse.cs | 1 - .../TransferUtilityUploadDirectoryRequest.cs | 107 +++++++ .../TransferUtilityUploadDirectoryResponse.cs | 23 +- .../Services/S3/Properties/AssemblyInfo.cs | 4 + ...tegrationTestUtilities.NetFramework.csproj | 14 + ...DK.IntegrationTests.S3.NetFramework.csproj | 94 +++--- .../IntegrationTests/TransferUtilityTests.cs | 112 +++++++ .../S3/UnitTests/Custom/FailurePolicyTests.cs | 290 +++++++++++++++++- .../Custom/UploadDirectoryCommandTests.cs | 227 ++++++++++++++ 20 files changed, 958 insertions(+), 102 deletions(-) create mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json rename sdk/src/Services/S3/Custom/Transfer/{Model => }/DirectoryResult.cs (97%) rename sdk/src/Services/S3/Custom/Transfer/{Model => }/FailurePolicy.cs (97%) create mode 100644 sdk/test/Services/S3/UnitTests/Custom/UploadDirectoryCommandTests.cs diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json new file mode 100644 index 000000000000..e75662d5e8d0 --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json @@ -0,0 +1,12 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Add FailurePolicy property to TransferUtilityUploadDirectoryRequest to allow configuration of failure handling behavior during directory uploads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue uploading remaining files when a failure occurs.", + "Add ObjectUploadFailedEvent event to TransferUtilityUploadDirectoryRequest to notify users when an individual file upload fails during a directory upload operation. This event provides details about the failed upload, including the original request, the specific file request and the exception encountered." + ] + } + ] +} \ No newline at end of file diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs index 8eec59e0554a..5f1a040525fd 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs @@ -1,7 +1,7 @@ // ------------------------------------------------------------------------------ // // This code was generated by a tool. -// Runtime Version: 17.0.0.0 +// Runtime Version: 18.0.0.0 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. @@ -15,8 +15,8 @@ namespace ServiceClientGenerator.Generators.SourceFiles /// Class to produce the template output /// - #line 1 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "17.0.0.0")] + #line 1 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "18.0.0.0")] public partial class AssemblyInfo : BaseGenerator { #line hidden @@ -36,35 +36,35 @@ public override string TransformText() // associated with an assembly. [assembly: AssemblyTitle("""); - #line 12 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 12 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyTitle)); #line default #line hidden this.Write("\")]\r\n#if BCL\r\n[assembly: AssemblyDescription(\""); - #line 14 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 14 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: "4.7.2"))); #line default #line hidden this.Write("\")]\r\n#elif NETSTANDARD20\r\n[assembly: AssemblyDescription(\""); - #line 16 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 16 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: "NetStandard 2.0"))); #line default #line hidden this.Write("\")]\r\n#elif NETCOREAPP3_1\r\n[assembly: AssemblyDescription(\""); - #line 18 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 18 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: ".NET Core 3.1"))); #line default #line hidden this.Write("\")]\r\n#elif NET8_0\r\n[assembly: AssemblyDescription(\""); - #line 20 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 20 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: ".NET 8.0"))); #line default @@ -72,7 +72,7 @@ public override string TransformText() this.Write("\")]\r\n#else\r\n#error Unknown platform constant - unable to set correct AssemblyDesc" + "ription\r\n#endif\r\n\r\n"); - #line 25 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 25 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" if (this.Config.AssemblyTitle=="AWSSDK.DynamoDBv2") { #line default @@ -81,23 +81,43 @@ public override string TransformText() [assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] "); - #line 28 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 28 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" } #line default #line hidden - #line 29 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 29 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" if (this.Config.AssemblyTitle=="AWSSDK.S3") { #line default #line hidden - this.Write(@"[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] -[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] -[assembly: InternalsVisibleTo(""DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7"")] -"); - - #line 33 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + this.Write("[assembly: InternalsVisibleTo(\"AWSSDK.UnitTests.S3.NetFramework, PublicKey=002400" + + "0004800000940000000602000000240000525341310004000001000100db5f59f098d27276c78338" + + "75a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa7" + + "6adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a4799" + + "67c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4\")]\r\n[" + + "assembly: InternalsVisibleTo(\"AWSSDK.UnitTests.NetFramework, PublicKey=002400000" + + "4800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a" + + "6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76ad" + + "c811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c" + + "3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4\")]\r\n\r\n//" + + " We should remove this in the future when TransferUtility Upload/Download direct" + + "ory methods return responses.\r\n// We should update the Integration Tests in Tran" + + "sferUtilityTests.cs to not use the internal methods and instead use the new publ" + + "ic ones that return responses.\r\n[assembly: InternalsVisibleTo(\"AWSSDK.Integratio" + + "nTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341" + + "310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745" + + "db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba89" + + "1f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28" + + "f3a0752a81203f03ccb7f684db373518b4\")]\r\n[assembly: InternalsVisibleTo(\"DynamicPro" + + "xyGenAssembly2, PublicKey=002400000480000094000000060200000024000052534131000400" + + "0001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734a" + + "a39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64" + + "bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be" + + "11e6a7d3113e92484cf7045cc7\")]\r\n"); + + #line 37 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" } #line default @@ -126,14 +146,14 @@ public override string TransformText() // [assembly: AssemblyVersion(""1.0.*"")] [assembly: AssemblyVersion("""); - #line 56 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 60 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceVersion)); #line default #line hidden this.Write("\")]\r\n[assembly: AssemblyFileVersion(\""); - #line 57 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 61 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceFileVersion)); #line default diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt index 31dceb950beb..fbed276512ca 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt @@ -29,6 +29,10 @@ using System.Runtime.CompilerServices; <# if (this.Config.AssemblyTitle=="AWSSDK.S3") { #> [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] + +// We should remove this in the future when TransferUtility Upload/Download directory methods return responses. +// We should update the Integration Tests in TransferUtilityTests.cs to not use the internal methods and instead use the new public ones that return responses. +[assembly: InternalsVisibleTo("AWSSDK.IntegrationTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] <# } #> [assembly: AssemblyConfiguration("")] diff --git a/sdk/src/Services/S3/Custom/Transfer/Model/DirectoryResult.cs b/sdk/src/Services/S3/Custom/Transfer/DirectoryResult.cs similarity index 97% rename from sdk/src/Services/S3/Custom/Transfer/Model/DirectoryResult.cs rename to sdk/src/Services/S3/Custom/Transfer/DirectoryResult.cs index 3f8cbd84fb2e..5329b21e07f2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Model/DirectoryResult.cs +++ b/sdk/src/Services/S3/Custom/Transfer/DirectoryResult.cs @@ -20,7 +20,7 @@ * */ -namespace Amazon.S3.Transfer.Model +namespace Amazon.S3.Transfer { /// /// Overall outcome of a directory operation. diff --git a/sdk/src/Services/S3/Custom/Transfer/Model/FailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/FailurePolicy.cs similarity index 97% rename from sdk/src/Services/S3/Custom/Transfer/Model/FailurePolicy.cs rename to sdk/src/Services/S3/Custom/Transfer/FailurePolicy.cs index fbb265ca1103..5bf16b176a75 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Model/FailurePolicy.cs +++ b/sdk/src/Services/S3/Custom/Transfer/FailurePolicy.cs @@ -20,7 +20,7 @@ * */ -namespace Amazon.S3.Transfer.Model +namespace Amazon.S3.Transfer { /// /// Specifies the policy to apply when a failure occurs during a directory transfer operation. diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index d932a282b137..687167ab1dc1 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -31,7 +31,6 @@ using Amazon.S3.Util; using Amazon.Util.Internal; using Amazon.Runtime; -using Amazon.S3.Transfer.Model; namespace Amazon.S3.Transfer.Internal { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index 693a9ef8325a..148e34798d47 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -20,6 +20,7 @@ * */ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Text; @@ -34,12 +35,15 @@ namespace Amazon.S3.Transfer.Internal /// internal partial class UploadDirectoryCommand : BaseCommand { + private IFailurePolicy _failurePolicy; + private ConcurrentBag _errors = new ConcurrentBag(); TransferUtilityUploadDirectoryRequest _request; TransferUtility _utility; TransferUtilityConfig _config; int _totalNumberOfFiles; int _numberOfFilesUploaded; + int _numberOfFilesSuccessfullyUploaded; long _totalBytes; long _transferredBytes; @@ -48,6 +52,10 @@ internal UploadDirectoryCommand(TransferUtility utility, TransferUtilityConfig c this._utility = utility; this._request = request; this._config = config; + _failurePolicy = + request.FailurePolicy == FailurePolicy.AbortOnFailure + ? new AbortOnFailurePolicy() + : new ContinueOnFailurePolicy(_errors); } internal TransferUtilityUploadRequest ConstructRequest(string basePath, string filepath, string prefix) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs index a687917f7d9f..a7a58a4b02c5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs @@ -13,12 +13,6 @@ * permissions and limitations under the License. */ -using Amazon.S3.Model; -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Text; using System.Threading; using System.Threading.Tasks; @@ -30,24 +24,5 @@ internal abstract partial class BaseCommand where TResponse : class /// Executes the command and returns a typed response /// public abstract Task ExecuteAsync(CancellationToken cancellationToken); - - protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts) where T : class - { - try - { - await command.ExecuteAsync(internalCts.Token) - .ConfigureAwait(continueOnCapturedContext: false); - } - catch (Exception exception) - { - if (!(exception is OperationCanceledException)) - { - // Cancel scheduling any more tasks. - // Cancel other upload requests. - internalCts.Cancel(); - } - throw; - } - } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index 11c210a95cd9..7c19d78ed2c3 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -15,7 +15,6 @@ using Amazon.S3.Model; using Amazon.S3.Util; -using Amazon.S3.Transfer.Model; using System; using System.Collections.Concurrent; using System.Collections.Generic; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index 07c71c27363f..e56c811fbb36 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -20,23 +20,29 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Amazon.Runtime.Internal.Util; namespace Amazon.S3.Transfer.Internal { internal partial class UploadDirectoryCommand : BaseCommand { public bool UploadFilesConcurrently { get; set; } + private readonly Logger _logger = Logger.GetLogger(typeof(UploadDirectoryCommand)); public override async Task ExecuteAsync(CancellationToken cancellationToken) { string prefix = GetKeyPrefix(); - string basePath = new DirectoryInfo(this._request.Directory).FullName; + _logger.DebugFormat("UploadDirectoryCommand starting. BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", + basePath, prefix, UploadFilesConcurrently, this._config.ConcurrentServiceRequests); + string[] filePaths = await GetFiles(basePath, this._request.SearchPattern, this._request.SearchOption, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + .ConfigureAwait(continueOnCapturedContext: false); this._totalNumberOfFiles = filePaths.Length; + _logger.DebugFormat("Discovered {0} file(s) to upload. TotalBytes={1}", _totalNumberOfFiles, _totalBytes); + // Two-level throttling architecture: // 1. File-level throttler: Controls how many files are uploaded concurrently // 2. HTTP-level throttler: Controls total HTTP requests across ALL file uploads @@ -54,11 +60,12 @@ public override async Task ExecuteAsync( try { var pendingTasks = new List(); - + // File-level throttler: Controls concurrent file operations - fileOperationThrottler = UploadFilesConcurrently ? + fileOperationThrottler = UploadFilesConcurrently ? new SemaphoreSlim(this._config.ConcurrentServiceRequests) : new SemaphoreSlim(1); + _logger.DebugFormat("Created fileOperationThrottler with initial count={0}", UploadFilesConcurrently ? this._config.ConcurrentServiceRequests : 1); // HTTP-level throttler: Shared across all uploads to control total HTTP concurrency sharedHttpRequestThrottler = this._utility.S3Client is Amazon.S3.Internal.IAmazonS3Encryption ? @@ -69,12 +76,22 @@ public override async Task ExecuteAsync( // Use a throttler which will be shared between simple and multipart uploads // to control total concurrent HTTP requests across all file operations. new SemaphoreSlim(this._config.ConcurrentServiceRequests); - + if (sharedHttpRequestThrottler == null) + { + _logger.Debug(null, "sharedHttpRequestThrottler disabled due to encryption client. Multipart uploads will be serial per file."); + } + else + { + _logger.DebugFormat("Created sharedHttpRequestThrottler with initial count={0}", this._config.ConcurrentServiceRequests); + } internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + foreach (string filepath in filePaths) { + _logger.DebugFormat("Waiting for fileOperationThrottler to schedule file."); await fileOperationThrottler.WaitAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); + _logger.DebugFormat("Acquired fileOperationThrottler. Currently scheduled: {0}", pendingTasks.Count + 1); try { @@ -85,30 +102,69 @@ public override async Task ExecuteAsync( // don't schedule any more upload tasks. // Don't throw an OperationCanceledException here as we want to process the // responses and throw the original exception. + _logger.Debug(null, "Internal cancellation requested; breaking out of scheduling loop."); break; } + var uploadRequest = ConstructRequest(basePath, filepath, prefix); - var uploadCommand = _utility.GetUploadCommand(uploadRequest, sharedHttpRequestThrottler); - var task = ExecuteCommandAsync(uploadCommand, internalCts); + Action onFailure = (ex) => + { + this._request.OnRaiseObjectUploadFailedEvent( + new ObjectUploadFailedEventArgs( + this._request, + uploadRequest, + ex)); + }; + + var task = _failurePolicy.ExecuteAsync( + async () => { + _logger.DebugFormat("Starting upload command"); + var command = _utility.GetUploadCommand(uploadRequest, sharedHttpRequestThrottler); + await command.ExecuteAsync(internalCts.Token) + .ConfigureAwait(false); + var uploaded = Interlocked.Increment(ref _numberOfFilesSuccessfullyUploaded); + _logger.DebugFormat("Completed upload. FilesSuccessfullyUploaded={0}", uploaded); + }, + onFailure, + internalCts + ); + pendingTasks.Add(task); + _logger.DebugFormat("Scheduled upload task. PendingTasks=01}", pendingTasks.Count); } finally { fileOperationThrottler.Release(); } } + + _logger.DebugFormat("Awaiting completion of {0} scheduled task(s)", pendingTasks.Count); await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); } finally - { + { internalCts.Dispose(); fileOperationThrottler.Dispose(); sharedHttpRequestThrottler?.Dispose(); + _logger.DebugFormat("UploadDirectoryCommand finished. FilesSuccessfullyUploaded={0}", _numberOfFilesSuccessfullyUploaded); } - return new TransferUtilityUploadDirectoryResponse(); + var response = new TransferUtilityUploadDirectoryResponse + { + ObjectsUploaded = _numberOfFilesSuccessfullyUploaded, + ObjectsFailed = _errors.Count, + Errors = _errors.ToList(), + Result = _errors.Count == 0 ? + DirectoryResult.Success : + (_numberOfFilesSuccessfullyUploaded > 0 ? + DirectoryResult.PartialSuccess : + DirectoryResult.Failure) + }; + + _logger.DebugFormat("Response summary: Uploaded={0}, Failed={1}, Result={2}", response.ObjectsUploaded, response.ObjectsFailed, response.Result); + return response; } private Task GetFiles(string path, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken) diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs index 12dffa4b2b86..9931d29c1e8c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs @@ -30,7 +30,6 @@ using Amazon.Runtime.Internal; using System.Globalization; using System.Threading; -using Amazon.S3.Transfer.Model; namespace Amazon.S3.Transfer diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs index 1bed1f94ffb2..63533406b4d2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using Amazon.S3.Transfer.Model; namespace Amazon.S3.Transfer { diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs index cf7be9f65437..004e83d1f81d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs @@ -42,6 +42,44 @@ public class TransferUtilityUploadDirectoryRequest : BaseUploadRequest string _keyPrefix; private bool _uploadFilesConcurrently = false; SearchOption _searchOption = SearchOption.TopDirectoryOnly; + private FailurePolicy failurePolicy = FailurePolicy.AbortOnFailure; + + /// + /// Gets or sets the failure policy for the upload directory operation. + /// Determines whether the operation should abort or continue when a failure occurs during upload. + /// The default value is . + /// + public FailurePolicy FailurePolicy + { + get { return this.failurePolicy; } + set { this.failurePolicy = value; } + } + + /// + /// Occurs when an individual object fails to upload during an UploadDirectory operation. + /// + /// + /// Subscribers will receive a instance containing + /// the original , the failed + /// , and the exception that caused the failure. + /// This event is raised on a background thread by the transfer utility. + /// + /// + /// request.ObjectUploadFailedEvent += (sender, args) => + /// { + /// // inspect args.DirectoryRequest, args.ObjectRequest, args.Exception + /// }; + /// + public event EventHandler ObjectUploadFailedEvent; + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The details of the failed object upload. + internal void OnRaiseObjectUploadFailedEvent(ObjectUploadFailedEventArgs args) + { + ObjectUploadFailedEvent?.Invoke(this, args); + } /// /// Gets or sets the directory where files are uploaded from. @@ -382,4 +420,73 @@ public UploadDirectoryFileRequestArgs(TransferUtilityUploadRequest request) /// public TransferUtilityUploadRequest UploadRequest { get; set; } } + + /// + /// Provides data for + /// which is raised when an individual object fails to upload during an + /// UploadDirectory operation. + /// + /// + /// Instances of this class are created by the transfer implementation and + /// passed to event subscribers. The instance contains the original directory + /// upload request (), + /// the per-object upload request that failed (), + /// and the exception that caused the failure. + /// + /// + /// + /// var request = new TransferUtilityUploadDirectoryRequest { /* ... */ }; + /// request.ObjectUploadFailedEvent += (sender, args) => + /// { + /// // args.DirectoryRequest: original directory request + /// // args.ObjectRequest: upload request for the failed object + /// // args.Exception: exception thrown during the object upload + /// Console.WriteLine($"Failed to upload {args.ObjectRequest.Key}: {args.Exception}"); + /// }; + /// + /// + public class ObjectUploadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The original that initiated the directory upload. + /// The representing the individual object upload that failed. + /// The that caused the object upload to fail. + internal ObjectUploadFailedEventArgs( + TransferUtilityUploadDirectoryRequest directoryRequest, + TransferUtilityUploadRequest objectRequest, + Exception exception) + { + DirectoryRequest = directoryRequest; + ObjectRequest = objectRequest; + Exception = exception; + } + + /// + /// Gets the original that initiated the directory upload. + /// + /// + /// The directory-level request that configured the overall UploadDirectory operation. + /// + public TransferUtilityUploadDirectoryRequest DirectoryRequest { get; private set; } + + /// + /// Gets the for the individual object that failed to upload. + /// + /// + /// Contains per-object parameters such as the S3 key and version id (if set). + /// + public TransferUtilityUploadRequest ObjectRequest { get; private set; } + + /// + /// Gets the that caused the object upload to fail. + /// + /// + /// The exception thrown by the underlying upload operation. Can be an , + /// , , or other exception type depending + /// on the failure mode. + /// + public Exception Exception { get; private set; } + } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs index 94f32558d1fb..2c3912207060 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs @@ -20,7 +20,8 @@ * */ -using Amazon.Runtime; +using System; +using System.Collections.Generic; namespace Amazon.S3.Transfer { @@ -30,6 +31,24 @@ namespace Amazon.S3.Transfer /// public class TransferUtilityUploadDirectoryResponse { - // Empty placeholder class - properties will be added in future iterations + /// + /// The number of objects that have been successfully uploaded. + /// + public long ObjectsUploaded { get; set; } + + /// + /// The number of objects that failed to upload. Zero if all succeeded. + /// + public long ObjectsFailed { get; set; } + + /// + /// The collection of exceptions encountered when uploading individual objects. + /// + public IList Errors { get; set; } + + /// + /// Overall result of the directory upload operation. + /// + public DirectoryResult Result { get; set; } } } diff --git a/sdk/src/Services/S3/Properties/AssemblyInfo.cs b/sdk/src/Services/S3/Properties/AssemblyInfo.cs index 980e732be31d..9c65f527a8c4 100644 --- a/sdk/src/Services/S3/Properties/AssemblyInfo.cs +++ b/sdk/src/Services/S3/Properties/AssemblyInfo.cs @@ -21,6 +21,10 @@ [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] + +// We should remove this in the future when TransferUtility Upload/Download directory methods return responses. +// We should update the Integration Tests in TransferUtilityTests.cs to not use the internal methods and instead use the new public ones that return responses. +[assembly: InternalsVisibleTo("AWSSDK.IntegrationTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] diff --git a/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj b/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj index 5fb4162b7a2d..53471f142b89 100644 --- a/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj +++ b/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj @@ -16,9 +16,23 @@ false false true + true CS1591,CS0612,CS0618 true + + + + + ../../awssdk.dll.snk + + + + + $(AWSKeyFile) + + + diff --git a/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj b/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj index 09d7ecd49090..832f59d0fece 100644 --- a/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj +++ b/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj @@ -1,67 +1,81 @@  - net472 - $(DefineConstants);DEBUG;TRACE;BCL;ASYNC_AWAIT;LOCAL_FILE - portable - false - AWSSDK.IntegrationTests.S3.NetFramework - AWSSDK.IntegrationTests.S3.NetFramework + net472 + $(DefineConstants);DEBUG;TRACE;BCL;ASYNC_AWAIT;LOCAL_FILE + portable + false + AWSSDK.IntegrationTests.S3.NetFramework + AWSSDK.IntegrationTests.S3.NetFramework - false - false - false - false - false - false - false - false - true - true - CS1591,CS0612,CS0618 + false + false + false + false + false + false + false + false + true + true + true + CS1591,CS0612,CS0618 + + + + + ../../../../awssdk.dll.snk + + + + + $(AWSKeyFile) + + + - - - + + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - - - - + + + + + - + - + \ No newline at end of file diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index bc42edaba5e3..e3f1fa4a2272 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -2571,6 +2571,118 @@ public override long Length } } } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectoryFailurePolicy_ContinueOnFailure_AllFailures() + { + var nonExistentBucket = "non-existent-" + Guid.NewGuid().ToString("N"); + var directory = CreateTestDirectory(1 * KILO_SIZE, numberOfTestFiles: 3); + + try + { + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = nonExistentBucket, + Directory = directory.FullName, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + FailurePolicy = FailurePolicy.ContinueOnFailure, + UploadFilesConcurrently = true + }; + + // ContinueOnFailure should not throw even if all uploads fail + var config = new TransferUtilityConfig(); + var command = new Amazon.S3.Transfer.Internal.UploadDirectoryCommand(transferUtility, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsUploaded); + Assert.AreEqual(3, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Failure, response.Result); + } + } + finally + { + try { Directory.Delete(directory.FullName, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectoryFailurePolicy_ContinueOnFailure_AllSuccess() + { + var directory = CreateTestDirectory(1 * KILO_SIZE, numberOfTestFiles: 3); + try + { + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = directory.FullName, + KeyPrefix = directory.Name, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + FailurePolicy = FailurePolicy.ContinueOnFailure, + UploadFilesConcurrently = true + }; + + var config = new TransferUtilityConfig(); + var command = new Amazon.S3.Transfer.Internal.UploadDirectoryCommand(transferUtility, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + + // Validate uploaded contents + ValidateDirectoryContents(Client, bucketName, directory.Name, directory, plainTextContentType); + } + } + finally + { + try { Directory.Delete(directory.FullName, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectoryFailurePolicy_AbortOnFailure_Throws() + { + var nonExistentBucket = "non-existent-" + Guid.NewGuid().ToString("N"); + var directory = CreateTestDirectory(1 * KILO_SIZE, numberOfTestFiles: 2); + + try + { + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = nonExistentBucket, + Directory = directory.FullName, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + FailurePolicy = FailurePolicy.AbortOnFailure, + UploadFilesConcurrently = true + }; + + var config = new TransferUtilityConfig(); + var command = new Amazon.S3.Transfer.Internal.UploadDirectoryCommand(transferUtility, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + await Assert.ThrowsExceptionAsync(() => command.ExecuteAsync(CancellationToken.None)); + } + } + finally + { + try { Directory.Delete(directory.FullName, true); } catch { } + } + } } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs index 2be179501ae9..1bbdce15284f 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs @@ -1,7 +1,6 @@ using Amazon.S3; using Amazon.S3.Model; using Amazon.S3.Transfer; -using Amazon.S3.Transfer.Model; using Amazon.S3.Transfer.Internal; using Microsoft.VisualStudio.TestTools.UnitTesting; using Moq; @@ -30,6 +29,17 @@ private static TransferUtilityDownloadDirectoryRequest CreateRequest(string loca }; } + private static TransferUtilityUploadDirectoryRequest CreateUploadRequest(string localDir, FailurePolicy policy) + { + return new TransferUtilityUploadDirectoryRequest + { + BucketName = "test-bucket", + Directory = localDir, + FailurePolicy = policy, + UploadFilesConcurrently = true + }; + } + private static GetObjectResponse SuccessObject(string bucket, string key, string content = "data") { return new GetObjectResponse @@ -71,6 +81,29 @@ private static Mock CreateMockS3(IEnumerable keys, Func CreateMockS3ForUpload(IEnumerable keys, Func shouldFail) + { + var mock = new Mock(); + mock.Setup(m => m.Config).Returns(new AmazonS3Config()); + + foreach (var key in keys) + { + if (shouldFail(key)) + { + mock.Setup(m => m.PutObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("Simulated failure for " + key)); + } + else + { + mock.Setup(m => m.PutObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ReturnsAsync(new PutObjectResponse()); + } + } + + mock.Setup(m => m.Dispose()); + return mock; + } + private static string CreateTempDirectory() { string dir = Path.Combine(Path.GetTempPath(), "FailurePolicyTests", Guid.NewGuid().ToString()); @@ -336,5 +369,260 @@ public async Task DownloadDirectory_ObjectDownloadFailedEvent_ArgsContainExpecte try { Directory.Delete(localDir, true); } catch { } } } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ContinueOnFailure_PartialSuccess() + { + var fileNames = new[] { "file1.txt", "file2.txt", "file3.txt" }; + string localDir = CreateTempDirectory(); + try + { + // create files + foreach (var f in fileNames) + { + File.WriteAllText(Path.Combine(localDir, f), "data"); + } + + var mockS3 = CreateMockS3ForUpload(fileNames, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsUploaded); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(1, response.Errors.Count); + // local files remain + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file3.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file2.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ContinueOnFailure_AllFailures() + { + var fileNames = new[] { "fileA.txt", "fileB.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => true); + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsUploaded); + Assert.AreEqual(2, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Failure, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(2, response.Errors.Count); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ContinueOnFailure_AllSuccess() + { + var fileNames = new[] { "ok1.txt", "ok2.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => false); + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_AbortOnFailure_ThrowsOnFirstFailure() + { + var fileNames = new[] { "first.txt", "second.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => k.EndsWith("second.txt", StringComparison.Ordinal)); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.AbortOnFailure); + + var ex = await Assert.ThrowsExceptionAsync(() => tu.UploadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("second.txt")); + // first file may or may not have uploaded depending on timing; ensure at least one file attempt occurred + Assert.IsTrue(Directory.GetFiles(localDir).Length >= 1); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ObjectUploadFailedEvent_CancelInHandler_ContinueOnFailure_Throws() + { + var fileNames = new[] { "file1.txt", "file2.txt", "file3.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + // Make sequential to make behavior deterministic for the test. + request.UploadFilesConcurrently = false; + + bool handlerInvoked = false; + request.ObjectUploadFailedEvent += (sender, args) => + { + handlerInvoked = true; + throw new AmazonS3Exception("Stop processing immediately"); + }; + + var ex = await Assert.ThrowsExceptionAsync(() => tu.UploadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Equals("Stop processing immediately")); + + Assert.IsTrue(handlerInvoked, "ObjectUploadFailedEvent handler was not invoked."); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ObjectUploadFailedEvent_ArgsContainExpectedData_ContinueOnFailure() + { + var fileNames = new[] { "a.txt", "b.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(new[] { "a.txt", "b.txt" }, k => k.EndsWith("b.txt", StringComparison.Ordinal)); + var config = new TransferUtilityConfig(); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + // collect events + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + request.ObjectUploadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var tu = new TransferUtility(mockS3.Object); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // wait briefly for any background event dispatch + invoked.Wait(1000); + + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(1, captured.Count); + + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("b.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ObjectUploadFailedEvent_ArgsContainExpectedData_AbortOnFailure() + { + var fileNames = new[] { "x.txt", "y.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(new[] { "x.txt", "y.txt" }, k => k.EndsWith("y.txt", StringComparison.Ordinal)); + var request = CreateUploadRequest(localDir, FailurePolicy.AbortOnFailure); + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + + request.ObjectUploadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var tu = new TransferUtility(mockS3.Object); + await Assert.ThrowsExceptionAsync(() => tu.UploadDirectoryAsync(request)); + + // wait for event + invoked.Wait(1000); + + Assert.AreEqual(1, captured.Count); + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("y.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/UploadDirectoryCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/UploadDirectoryCommandTests.cs new file mode 100644 index 000000000000..f2e30d440455 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/UploadDirectoryCommandTests.cs @@ -0,0 +1,227 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class UploadDirectoryCommandTests + { + private string _testDirectory; + private Mock _mockS3Client; + private TransferUtilityConfig _config; + + [TestInitialize] + public void Setup() + { + _testDirectory = Path.Combine(Path.GetTempPath(), "UploadDirectoryCommandTests_" + Guid.NewGuid().ToString("N").Substring(0, 8)); + Directory.CreateDirectory(_testDirectory); + + // Create some test files + File.WriteAllBytes(Path.Combine(_testDirectory, "file1.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file2.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file3.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file4.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file5.dat"), GenerateTestData(1024)); + + _mockS3Client = new Mock(); + _config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 4 + }; + + var s3Config = new AmazonS3Config + { + BufferSize = 8192, + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + } + + [TestCleanup] + public void Cleanup() + { + if (Directory.Exists(_testDirectory)) + { + try + { + Directory.Delete(_testDirectory, true); + } + catch + { + // Ignore cleanup errors in tests + } + } + } + + #region Concurrency Control Tests + + /// + /// Tests that ConcurrentServiceRequests setting actually limits concurrent file uploads. + /// Expected: Max 2 concurrent uploads (ConcurrentServiceRequests = 2) + /// + [TestMethod] + public async Task ExecuteAsync_ConcurrentServiceRequests_RespectsLimit() + { + // Arrange + var request = CreateUploadDirectoryRequest(); + request.UploadFilesConcurrently = true; + + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 + }; + + var currentConcurrentUploads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + // Map filenames to sizes + var files = Directory.GetFiles(_testDirectory).ToDictionary(Path.GetFileName, f => new FileInfo(f).Length); + + // Mock PutObjectAsync to track concurrency + _mockS3Client.Setup(c => c.PutObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (PutObjectRequest req, CancellationToken ct) => + { + lock (concurrencyLock) + { + currentConcurrentUploads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentUploads); + } + + try + { + await Task.Delay(100, ct); + var fileName = Path.GetFileName(req.FilePath); + var fileSize = files[fileName]; + return new PutObjectResponse + { + ETag = "\"test-etag\"", + HttpStatusCode = System.Net.HttpStatusCode.OK, + }; + } + finally + { + lock (concurrencyLock) + { + currentConcurrentUploads--; + } + } + }); + + var utility = new TransferUtility(_mockS3Client.Object, config); + var command = new UploadDirectoryCommand(utility, config, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(2, config.ConcurrentServiceRequests, "Test setup verification"); + Assert.IsTrue(maxObservedConcurrency <= config.ConcurrentServiceRequests, + $"Max concurrent uploads ({maxObservedConcurrency}) should not exceed ConcurrentServiceRequests ({config.ConcurrentServiceRequests})"); + } + + /// + /// Tests that sequential mode (UploadFilesConcurrently = false) uploads only one file at a time. + /// Expected: Max 1 concurrent upload (sequential mode) + /// + [TestMethod] + public async Task ExecuteAsync_SequentialMode_UploadsOneAtATime() + { + // Arrange + var request = CreateUploadDirectoryRequest(); + request.UploadFilesConcurrently = false; + + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 10 + }; + + var currentConcurrentUploads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + var files = Directory.GetFiles(_testDirectory).Take(3).ToDictionary(Path.GetFileName, f => new FileInfo(f).Length); + + // Mock PutObjectAsync to track concurrency + _mockS3Client.Setup(c => c.PutObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (PutObjectRequest req, CancellationToken ct) => + { + lock (concurrencyLock) + { + currentConcurrentUploads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentUploads); + } + + try + { + await Task.Delay(50, ct); + return new PutObjectResponse + { + ETag = "\"test-etag\"", + HttpStatusCode = System.Net.HttpStatusCode.OK, + }; + } + finally + { + lock (concurrencyLock) + { + currentConcurrentUploads--; + } + } + }); + + var utility = new TransferUtility(_mockS3Client.Object, config); + var command = new UploadDirectoryCommand(utility, config, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, maxObservedConcurrency, + $"Sequential mode should only upload 1 file at a time, but observed {maxObservedConcurrency}"); + } + + #endregion + + #region Helper Methods + + private TransferUtilityUploadDirectoryRequest CreateUploadDirectoryRequest( + string bucketName = "test-bucket", + string s3Directory = "prefix", + string localDirectory = null) + { + localDirectory = localDirectory ?? _testDirectory; + + return new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + KeyPrefix = s3Directory, + Directory = localDirectory + }; + } + + private byte[] GenerateTestData(int size) + { + var data = new byte[size]; + var random = new Random(42); // Fixed seed for reproducible tests + random.NextBytes(data); + return data; + } + + #endregion + } +} From 6a31b2524e2005b91f80f024b54993f559f85da9 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 11:11:34 -0500 Subject: [PATCH 36/56] Fix download directory concurrency issue and refactor (#4180) --- .../Internal/DownloadDirectoryCommand.cs | 14 +- .../Custom/Transfer/Internal/TaskHelpers.cs | 104 ++++- .../DownloadDirectoryCommand.cs | 411 ++++++++++++------ .../Custom/DownloadDirectoryCommandTests.cs | 210 ++++++++- .../S3/UnitTests/Custom/FailurePolicyTests.cs | 377 ++++++++++++++++ 5 files changed, 962 insertions(+), 154 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 687167ab1dc1..77f868a520fa 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -48,12 +48,7 @@ internal partial class DownloadDirectoryCommand : BaseCommand internal static class TaskHelpers { + private static Logger Logger + { + get { return Logger.GetLogger(typeof(TaskHelpers)); } + } + /// /// Waits for all tasks to complete or till any task fails or is canceled. /// @@ -33,7 +41,10 @@ internal static class TaskHelpers internal static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, CancellationToken cancellationToken) { int processed = 0; - int total = pendingTasks.Count; + int total = pendingTasks.Count; + + Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); + while (processed < total) { cancellationToken.ThrowIfCancellationRequested(); @@ -48,7 +59,12 @@ await completedTask pendingTasks.Remove(completedTask); processed++; + + Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", + processed, total, pendingTasks.Count); } + + Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); } /// @@ -64,6 +80,9 @@ internal static async Task> WhenAllOrFirstExceptionAsync(List int processed = 0; int total = pendingTasks.Count; var responses = new List(); + + Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); + while (processed < total) { cancellationToken.ThrowIfCancellationRequested(); @@ -79,9 +98,92 @@ internal static async Task> WhenAllOrFirstExceptionAsync(List pendingTasks.Remove(completedTask); processed++; + + Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", + processed, total, pendingTasks.Count); } + Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); + return responses; } + + /// + /// Executes work items with limited concurrency using a task pool pattern. + /// Creates only as many tasks as the concurrency limit allows, rather than creating + /// all tasks upfront. This reduces memory overhead for large collections. + /// + /// + /// This method provides a clean way to limit concurrent operations without creating + /// all tasks upfront. It maintains a pool of active tasks up to the maxConcurrency limit, + /// replacing completed tasks with new ones until all items are processed. + /// The caller is responsible for implementing failure handling within the processAsync function. + /// + /// The type of items to process + /// The collection of items to process + /// Maximum number of concurrent tasks + /// Async function to process each item + /// Cancellation token to observe + /// A task that completes when all items are processed, or throws on first failure + internal static async Task ForEachWithConcurrencyAsync( + IEnumerable items, + int maxConcurrency, + Func processAsync, + CancellationToken cancellationToken) + { + var itemList = items as IList ?? items.ToList(); + if (itemList.Count == 0) + { + Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: No items to process"); + return; + } + + Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting with TotalItems={0}, MaxConcurrency={1}", + itemList.Count, maxConcurrency); + + int nextIndex = 0; + var activeTasks = new List(); + + // Start initial batch up to concurrency limit + int initialBatchSize = Math.Min(maxConcurrency, itemList.Count); + Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting initial batch of {0} tasks", initialBatchSize); + + for (int i = 0; i < initialBatchSize; i++) + { + var task = processAsync(itemList[nextIndex++], cancellationToken); + activeTasks.Add(task); + } + + // Process completions and start new tasks until all work is done + while (activeTasks.Count > 0) + { + cancellationToken.ThrowIfCancellationRequested(); + + var completedTask = await Task.WhenAny(activeTasks) + .ConfigureAwait(continueOnCapturedContext: false); + + // Propagate exceptions (fail-fast behavior by default) + // Caller's processAsync function should handle failure policy if needed + await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + + activeTasks.Remove(completedTask); + + int itemsCompleted = nextIndex - activeTasks.Count; + Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Task completed (Active={0}, Completed={1}/{2}, Remaining={3})", + activeTasks.Count, itemsCompleted, itemList.Count, itemList.Count - itemsCompleted); + + // Start next task if more work remains + if (nextIndex < itemList.Count) + { + Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting next task (Index={0}/{1}, Active={2})", + nextIndex + 1, itemList.Count, activeTasks.Count + 1); + var nextTask = processAsync(itemList[nextIndex++], cancellationToken); + activeTasks.Add(nextTask); + } + } + + Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: All items processed (Total={0})", itemList.Count); + } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index 7c19d78ed2c3..f9a44ec2b83a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -24,6 +24,7 @@ using System.Threading; using System.Threading.Tasks; using Amazon.Runtime; +using Amazon.Runtime.Internal.Util; using Amazon.Util.Internal; namespace Amazon.S3.Transfer.Internal @@ -34,177 +35,304 @@ internal partial class DownloadDirectoryCommand : BaseCommand ExecuteAsync(CancellationToken cancellationToken) { + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Starting - DownloadFilesConcurrently={0}, UseMultipartDownload={1}, ConcurrentServiceRequests={2}", + DownloadFilesConcurrently, this._useMultipartDownload, this._config.ConcurrentServiceRequests); + + // Step 1: Validate and setup ValidateRequest(); EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); + // Step 2: List S3 objects + var (s3Objects, prefixLength) = await ListS3ObjectsAsync(cancellationToken) + .ConfigureAwait(false); + + this._totalNumberOfFilesToDownload = s3Objects.Count; + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Found {0} total objects, TotalBytes={1}", + s3Objects.Count, this._totalBytes); + + // Step 3: Filter to actual files (exclude directory markers) + var objectsToDownload = FilterObjectsToDownload(s3Objects); + + // Step 4: Setup resources and execute downloads + using (var resources = CreateDownloadResources(cancellationToken)) + { + await ExecuteParallelDownloadsAsync( + objectsToDownload, + prefixLength, + resources, + cancellationToken) + .ConfigureAwait(false); + } + + // Step 5: Build response + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Completed - ObjectsDownloaded={0}, ObjectsFailed={1}", + _numberOfFilesDownloaded, _errors.Count); + + return BuildResponse(); + } + + /// + /// Encapsulates disposable resources used during directory download. + /// + private sealed class DownloadResources : IDisposable + { + public SemaphoreSlim HttpRequestThrottler { get; } + public CancellationTokenSource InternalCancellationTokenSource { get; } + + public DownloadResources( + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource cancellationTokenSource) + { + HttpRequestThrottler = httpRequestThrottler; + InternalCancellationTokenSource = cancellationTokenSource; + } + + public void Dispose() + { + InternalCancellationTokenSource?.Dispose(); + HttpRequestThrottler?.Dispose(); + } + } + + /// + /// Lists S3 objects to download and calculates prefix length. + /// + private async Task<(List objects, int prefixLength)> ListS3ObjectsAsync( + CancellationToken cancellationToken) + { List objs; string listRequestPrefix; + try { - ListObjectsRequest listRequest = ConstructListObjectRequest(); + var listRequest = ConstructListObjectRequest(); listRequestPrefix = listRequest.Prefix; - objs = await GetS3ObjectsToDownloadAsync(listRequest, cancellationToken).ConfigureAwait(false); + objs = await GetS3ObjectsToDownloadAsync(listRequest, cancellationToken) + .ConfigureAwait(false); } - catch (AmazonS3Exception ex) + catch (AmazonS3Exception ex) when (ex.StatusCode == System.Net.HttpStatusCode.NotImplemented) { - if (ex.StatusCode != System.Net.HttpStatusCode.NotImplemented) - throw; - - ListObjectsV2Request listRequestV2 = ConstructListObjectRequestV2(); + var listRequestV2 = ConstructListObjectRequestV2(); listRequestPrefix = listRequestV2.Prefix; - objs = await GetS3ObjectsToDownloadV2Async(listRequestV2, cancellationToken).ConfigureAwait(false); + objs = await GetS3ObjectsToDownloadV2Async(listRequestV2, cancellationToken) + .ConfigureAwait(false); } - this._totalNumberOfFilesToDownload = objs.Count; - - // Two-level throttling architecture: - // 1. File-level throttler: Controls how many files are downloaded concurrently - // 2. HTTP-level throttler: Controls total HTTP requests across ALL file downloads - // - // Example with ConcurrentServiceRequests = 10: - // - fileOperationThrottler = 10: Up to 10 files can download simultaneously - // - sharedHttpRequestThrottler = 10: All 10 files share 10 total HTTP request slots - // - Without HTTP throttler: Would result in 10 files × 10 parts = 100 concurrent HTTP requests - // - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files - // - // This prevents resource exhaustion when downloading many large files with multipart downloads. - SemaphoreSlim fileOperationThrottler = null; - SemaphoreSlim sharedHttpRequestThrottler = null; - CancellationTokenSource internalCts = null; + // Calculate prefix length + int prefixLength = listRequestPrefix.Length; + if (_request.DisableSlashCorrection && !listRequestPrefix.EndsWith("/")) + { + prefixLength = listRequestPrefix.LastIndexOf("/") + 1; + } - try + return (objs, prefixLength); + } + + /// + /// Filters out directory markers (keys ending with "/") from S3 objects list. + /// + private List FilterObjectsToDownload(List s3Objects) + { + var filtered = s3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + Logger.DebugFormat("DownloadDirectoryCommand.FilterObjectsToDownload: Filtered to {0} files to download (excluded {1} directory markers)", + filtered.Count, s3Objects.Count - filtered.Count); + + return filtered; + } + + /// + /// Creates resources needed for parallel downloads with proper throttling. + /// Throttling architecture: + /// - Task pool pattern (ForEachWithConcurrencyAsync): Controls concurrent file downloads + /// - HttpRequestThrottler: Controls total HTTP requests across ALL file downloads + /// + /// Example with ConcurrentServiceRequests = 10: + /// - Task pool creates max 10 concurrent file download tasks + /// - HttpRequestThrottler = 10: All files share 10 total HTTP request slots + /// - Without HTTP throttler: 10 multipart files × 10 parts = 100 concurrent HTTP requests + /// - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files + /// + /// This prevents resource exhaustion when downloading many large files with multipart downloads. + /// + private DownloadResources CreateDownloadResources(CancellationToken cancellationToken) + { + SemaphoreSlim httpRequestThrottler = null; + + // HTTP-level throttler: Shared across all downloads to control total HTTP concurrency + // Only needed for multipart downloads where each file makes multiple HTTP requests + if (this._useMultipartDownload) { - // File-level throttler: Controls concurrent file operations - fileOperationThrottler = DownloadFilesConcurrently ? - new SemaphoreSlim(this._config.ConcurrentServiceRequests) : - new SemaphoreSlim(1); - - // HTTP-level throttler: Shared across all downloads to control total HTTP concurrency - // Only needed for multipart downloads where each file makes multiple HTTP requests - if (this._useMultipartDownload) - { - sharedHttpRequestThrottler = new SemaphoreSlim(this._config.ConcurrentServiceRequests); - } + httpRequestThrottler = new SemaphoreSlim(this._config.ConcurrentServiceRequests); + Logger.DebugFormat("DownloadDirectoryCommand.CreateDownloadResources: Created HTTP throttler with MaxConcurrentRequests={0}", + this._config.ConcurrentServiceRequests); + } + + var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + + return new DownloadResources(httpRequestThrottler, internalCts); + } - internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); - var pendingTasks = new List(); - foreach (S3Object s3o in objs) + /// + /// Executes parallel downloads of all S3 objects using task pool pattern. + /// Only creates as many tasks as the concurrency limit allows (not all files up front). + /// + private async Task ExecuteParallelDownloadsAsync( + List objectsToDownload, + int prefixLength, + DownloadResources resources, + CancellationToken cancellationToken) + { + int concurrencyLevel = DownloadFilesConcurrently + ? this._config.ConcurrentServiceRequests + : 1; + + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Starting task pool with ConcurrencyLevel={0}, TotalFiles={1}", + concurrencyLevel, objectsToDownload.Count); + + await TaskHelpers.ForEachWithConcurrencyAsync( + objectsToDownload, + concurrencyLevel, + async (s3Object, ct) => { - if (s3o.Key.EndsWith("/", StringComparison.Ordinal)) - continue; + ct.ThrowIfCancellationRequested(); - await fileOperationThrottler.WaitAsync(cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + await DownloadSingleFileAsync( + s3Object, + prefixLength, + resources.HttpRequestThrottler, + resources.InternalCancellationTokenSource) + .ConfigureAwait(false); + }, + cancellationToken) + .ConfigureAwait(false); - try - { - cancellationToken.ThrowIfCancellationRequested(); - if (internalCts.IsCancellationRequested) - { - // Operation cancelled as one of the download requests failed with an exception, - // don't schedule any more download tasks. - // Don't throw an OperationCanceledException here as we want to process the - // responses and throw the original exception. - break; - } + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Task pool completed - ObjectsDownloaded={0}, ObjectsFailed={1}", + _numberOfFilesDownloaded, _errors.Count); + } - // Valid for serial uploads when - // TransferUtilityDownloadDirectoryRequest.DownloadFilesConcurrently is set to false. - int prefixLength = listRequestPrefix.Length; + /// + /// Downloads a single S3 object to local file system with validation and failure handling. + /// + private async Task DownloadSingleFileAsync( + S3Object s3Object, + int prefixLength, + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource internalCts) + { + if (internalCts.IsCancellationRequested) + return; - // If DisableSlashCorrection is enabled (i.e. S3Directory is a key prefix) and it doesn't end with '/' then we need the parent directory to properly construct download path. - if (_request.DisableSlashCorrection && !listRequestPrefix.EndsWith("/")) - { - prefixLength = listRequestPrefix.LastIndexOf("/") + 1; - } + this._currentFile = s3Object.Key.Substring(prefixLength); + var downloadRequest = ConstructTransferUtilityDownloadRequest(s3Object, prefixLength); - this._currentFile = s3o.Key.Substring(prefixLength); + // Create failure callback + Action onFailure = (ex) => + { + this._request.OnRaiseObjectDownloadFailedEvent( + new ObjectDownloadFailedEventArgs( + this._request, + downloadRequest, + ex)); + }; - TransferUtilityDownloadRequest downloadRequest = ConstructTransferUtilityDownloadRequest(s3o, prefixLength); + // Validate file path with failure policy + var isValid = await _failurePolicy.ExecuteAsync( + () => ValidateDownloadPath(downloadRequest.FilePath), + onFailure, + internalCts + ).ConfigureAwait(false); - Action onFailure = (ex) => - { - this._request.OnRaiseObjectDownloadFailedEvent( - new ObjectDownloadFailedEventArgs( - this._request, - downloadRequest, - ex)); - }; - - var isValid = await _failurePolicy.ExecuteAsync( - () => { - //Ensure the target file is a rooted within LocalDirectory. Otherwise error. - if(!InternalSDKUtils.IsFilePathRootedWithDirectoryPath(downloadRequest.FilePath, _request.LocalDirectory)) - { - throw new AmazonClientException($"The file {downloadRequest.FilePath} is not allowed outside of the target directory {_request.LocalDirectory}."); - } - - return Task.CompletedTask; - }, - onFailure, - internalCts - ).ConfigureAwait(false); - if (!isValid) continue; - - var task = _failurePolicy.ExecuteAsync( - async () => { - BaseCommand command; - if (this._useMultipartDownload) - { - command = new MultipartDownloadCommand(this._s3Client, downloadRequest, this._config, sharedHttpRequestThrottler); - } - else - { - command = new DownloadCommand(this._s3Client, downloadRequest); - } - await command.ExecuteAsync(internalCts.Token) - .ConfigureAwait(false); - }, - onFailure, - internalCts - ); - - pendingTasks.Add(task); - } - finally - { - fileOperationThrottler.Release(); - } - } - await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + if (!isValid) + return; - return new TransferUtilityDownloadDirectoryResponse - { - ObjectsDownloaded = _numberOfFilesDownloaded, - ObjectsFailed = _errors.Count, - Errors = _errors.ToList(), - Result = _errors.Count == 0 ? - DirectoryResult.Success : - (_numberOfFilesDownloaded > 0 ? - DirectoryResult.PartialSuccess : - DirectoryResult.Failure) - }; + // Execute download with failure policy + await _failurePolicy.ExecuteAsync( + () => ExecuteDownloadCommandAsync(downloadRequest, httpRequestThrottler, internalCts.Token), + onFailure, + internalCts + ).ConfigureAwait(false); + } + + /// + /// Validates that the download path is within the target directory. + /// + private Task ValidateDownloadPath(string filePath) + { + if (!InternalSDKUtils.IsFilePathRootedWithDirectoryPath(filePath, _request.LocalDirectory)) + { + throw new AmazonClientException( + $"The file {filePath} is not allowed outside of the target directory {_request.LocalDirectory}."); } - finally + return Task.CompletedTask; + } + + /// + /// Creates and executes the appropriate download command for the file. + /// + private async Task ExecuteDownloadCommandAsync( + TransferUtilityDownloadRequest downloadRequest, + SemaphoreSlim httpRequestThrottler, + CancellationToken cancellationToken) + { + BaseCommand command; + + if (this._useMultipartDownload) + { + command = new MultipartDownloadCommand( + this._s3Client, + downloadRequest, + this._config, + httpRequestThrottler); + } + else { - internalCts.Dispose(); - fileOperationThrottler.Dispose(); - sharedHttpRequestThrottler?.Dispose(); + command = new DownloadCommand(this._s3Client, downloadRequest); } + + await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); } + /// + /// Builds the response object based on download results. + /// + private TransferUtilityDownloadDirectoryResponse BuildResponse() + { + return new TransferUtilityDownloadDirectoryResponse + { + ObjectsDownloaded = _numberOfFilesDownloaded, + ObjectsFailed = _errors.Count, + Errors = _errors.ToList(), + Result = _errors.Count == 0 + ? DirectoryResult.Success + : (_numberOfFilesDownloaded > 0 + ? DirectoryResult.PartialSuccess + : DirectoryResult.Failure) + }; + } + + private async Task> GetS3ObjectsToDownloadAsync(ListObjectsRequest listRequest, CancellationToken cancellationToken) { + Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Starting object listing"); + List objs = new List(); + int pageCount = 0; do { ListObjectsResponse listResponse = await this._s3Client.ListObjectsAsync(listRequest, cancellationToken) @@ -222,13 +350,24 @@ private async Task> GetS3ObjectsToDownloadAsync(ListObjectsReques } } listRequest.Marker = listResponse.NextMarker; + pageCount++; + + Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", + pageCount, listResponse.S3Objects?.Count ?? 0, objs.Count); } while (!string.IsNullOrEmpty(listRequest.Marker)); + + Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Listing completed - TotalPages={0}, TotalObjects={1}", + pageCount, objs.Count); + return objs; } private async Task> GetS3ObjectsToDownloadV2Async(ListObjectsV2Request listRequestV2, CancellationToken cancellationToken) { + Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Starting object listing (V2 API)"); + List objs = new List(); + int pageCount = 0; do { ListObjectsV2Response listResponse = await this._s3Client.ListObjectsV2Async(listRequestV2, cancellationToken) @@ -246,7 +385,15 @@ private async Task> GetS3ObjectsToDownloadV2Async(ListObjectsV2Re } } listRequestV2.ContinuationToken = listResponse.NextContinuationToken; + pageCount++; + + Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", + pageCount, listResponse.S3Objects?.Count ?? 0, objs.Count); } while (!string.IsNullOrEmpty(listRequestV2.ContinuationToken)); + + Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Listing completed - TotalPages={0}, TotalObjects={1}", + pageCount, objs.Count); + return objs; } } diff --git a/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs index 4a813eca10ae..900acf89d93b 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs @@ -53,7 +53,7 @@ public void Constructor_WithValidParameters_CreatesCommand() var request = CreateDownloadDirectoryRequest(); // Act - var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); // Assert Assert.IsNotNull(command); @@ -66,7 +66,7 @@ public void Constructor_WithUseMultipartDownload_CreatesCommand() var request = CreateDownloadDirectoryRequest(); // Act - var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, useMultipartDownload: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); // Assert Assert.IsNotNull(command); @@ -93,7 +93,7 @@ public void Constructor_WithNullS3Client_ThrowsArgumentNullException() var request = CreateDownloadDirectoryRequest(); // Act - var command = new DownloadDirectoryCommand(null, request); + var command = new DownloadDirectoryCommand(null, request, _config, useMultipartDownload: false); } [TestMethod] @@ -101,7 +101,7 @@ public void Constructor_WithNullS3Client_ThrowsArgumentNullException() public void Constructor_WithNullRequest_ThrowsArgumentNullException() { // Act - var command = new DownloadDirectoryCommand(_mockS3Client.Object, null); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, null, _config, useMultipartDownload: false); } #endregion @@ -115,7 +115,7 @@ public async Task ExecuteAsync_WithMissingBucketName_ThrowsInvalidOperationExcep // Arrange var request = CreateDownloadDirectoryRequest(); request.BucketName = null; - var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); // Act await command.ExecuteAsync(CancellationToken.None); @@ -128,7 +128,7 @@ public async Task ExecuteAsync_WithEmptyBucketName_ThrowsInvalidOperationExcepti // Arrange var request = CreateDownloadDirectoryRequest(); request.BucketName = ""; - var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); // Act await command.ExecuteAsync(CancellationToken.None); @@ -141,7 +141,7 @@ public async Task ExecuteAsync_WithMissingS3Directory_ThrowsInvalidOperationExce // Arrange var request = CreateDownloadDirectoryRequest(); request.S3Directory = null; - var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); // Act await command.ExecuteAsync(CancellationToken.None); @@ -154,7 +154,7 @@ public async Task ExecuteAsync_WithEmptyS3Directory_ThrowsInvalidOperationExcept // Arrange var request = CreateDownloadDirectoryRequest(); request.S3Directory = ""; - var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); // Act await command.ExecuteAsync(CancellationToken.None); @@ -167,7 +167,7 @@ public async Task ExecuteAsync_WithMissingLocalDirectory_ThrowsInvalidOperationE // Arrange var request = CreateDownloadDirectoryRequest(); request.LocalDirectory = null; - var command = new DownloadDirectoryCommand(_mockS3Client.Object, request); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); // Act await command.ExecuteAsync(CancellationToken.None); @@ -817,6 +817,198 @@ public async Task ExecuteAsync_ProgressEventsCancellation_StopsProgressTracking( #endregion + #region Concurrency Control Tests + + /// + /// Tests that ConcurrentServiceRequests setting actually limits concurrent file downloads. + /// This test will FAIL on the current broken implementation, demonstrating that + /// ConcurrentServiceRequests is not being respected. + /// + /// Expected: Max 2 concurrent downloads (ConcurrentServiceRequests = 2) + /// Actual (broken): 5 concurrent downloads (all files download simultaneously) + /// + [TestMethod] + public async Task ExecuteAsync_ConcurrentServiceRequests_RespectsLimit() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; + + // Use a low limit to make violation obvious + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 // Only 2 files should download simultaneously + }; + + // Track concurrent downloads using thread-safe counter + var currentConcurrentDownloads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + var files = new Dictionary + { + { "file1.dat", 5 * 1024 * 1024 }, // 5MB files + { "file2.dat", 5 * 1024 * 1024 }, + { "file3.dat", 5 * 1024 * 1024 }, + { "file4.dat", 5 * 1024 * 1024 }, + { "file5.dat", 5 * 1024 * 1024 } // 5 files total + }; + + // Setup directory listing + var listResponse = CreateListObjectsResponse(files); + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + // Override GetObjectAsync to track concurrency + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (GetObjectRequest req, CancellationToken ct) => + { + // Increment counter when download starts + lock (concurrencyLock) + { + currentConcurrentDownloads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentDownloads); + Console.WriteLine($"Download started for {req.Key}. Current concurrent: {currentConcurrentDownloads}, Max observed: {maxObservedConcurrency}"); + } + + try + { + // Simulate some download time to ensure overlap + await Task.Delay(100, ct); + + // Return mock response + var fileName = req.Key.Split('/').Last(); + var fileSize = files[fileName]; + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + return new GetObjectResponse + { + BucketName = req.BucketName, + Key = req.Key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + ETag = "\"test-etag\"" + }; + } + finally + { + // Decrement counter when download completes + lock (concurrencyLock) + { + currentConcurrentDownloads--; + Console.WriteLine($"Download completed for {req.Key}. Current concurrent: {currentConcurrentDownloads}"); + } + } + }); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Console.WriteLine($"Test Results: Expected max concurrency ≤ {config.ConcurrentServiceRequests}, Observed: {maxObservedConcurrency}"); + Assert.AreEqual(2, config.ConcurrentServiceRequests, "Test setup verification"); + Assert.IsTrue(maxObservedConcurrency <= config.ConcurrentServiceRequests, + $"Max concurrent downloads ({maxObservedConcurrency}) should not exceed ConcurrentServiceRequests ({config.ConcurrentServiceRequests})"); + } + + /// + /// Tests that sequential mode (DownloadFilesConcurrently = false) downloads only one file at a time. + /// This test will FAIL on the current broken implementation, demonstrating that + /// sequential mode is not working correctly. + /// + /// Expected: Max 1 concurrent download (sequential mode) + /// Actual (broken): 3 concurrent downloads (all files download simultaneously despite sequential setting) + /// + [TestMethod] + public async Task ExecuteAsync_SequentialMode_DownloadsOneAtATime() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential mode + + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 10 // High limit, but sequential should still be 1 + }; + + // Track concurrent downloads + var currentConcurrentDownloads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + var files = new Dictionary + { + { "file1.dat", 1024 }, + { "file2.dat", 1024 }, + { "file3.dat", 1024 } + }; + + // Setup directory listing + var listResponse = CreateListObjectsResponse(files); + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + // Override GetObjectAsync to track concurrency + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (GetObjectRequest req, CancellationToken ct) => + { + lock (concurrencyLock) + { + currentConcurrentDownloads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentDownloads); + Console.WriteLine($"Sequential download started for {req.Key}. Current concurrent: {currentConcurrentDownloads}, Max observed: {maxObservedConcurrency}"); + } + + try + { + await Task.Delay(50, ct); // Brief delay + + var fileName = req.Key.Split('/').Last(); + var fileSize = files[fileName]; + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + return new GetObjectResponse + { + BucketName = req.BucketName, + Key = req.Key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + ETag = "\"test-etag\"" + }; + } + finally + { + lock (concurrencyLock) + { + currentConcurrentDownloads--; + Console.WriteLine($"Sequential download completed for {req.Key}. Current concurrent: {currentConcurrentDownloads}"); + } + } + }); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Console.WriteLine($"Sequential Test Results: Expected max concurrency = 1, Observed: {maxObservedConcurrency}"); + Assert.AreEqual(1, maxObservedConcurrency, + $"Sequential mode should only download 1 file at a time, but observed {maxObservedConcurrency}"); + } + + #endregion + #region Helper Methods private TransferUtilityDownloadDirectoryRequest CreateDownloadDirectoryRequest( diff --git a/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs index 1bbdce15284f..4d270c1f12ff 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs @@ -2,6 +2,7 @@ using Amazon.S3.Model; using Amazon.S3.Transfer; using Amazon.S3.Transfer.Internal; +using Amazon.Runtime; using Microsoft.VisualStudio.TestTools.UnitTesting; using Moq; using System; @@ -624,5 +625,381 @@ public async Task UploadDirectory_ObjectUploadFailedEvent_ArgsContainExpectedDat try { Directory.Delete(localDir, true); } catch { } } } + + #region Path Validation Failure Tests + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_PathTraversalAttack_ContinueOnFailure_SkipsInvalidPath() + { + // Test path traversal attack with ContinueOnFailure + // Malicious S3 key attempts to write outside target directory + var keys = new[] { + "prefix/valid1.txt", + "prefix/../../etc/passwd", // Path traversal attempt + "prefix/valid2.txt" + }; + var mockS3 = CreateMockS3(keys, k => false); // All downloads would succeed if allowed + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var captured = new List(); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + }; + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // Assert: Path validation failure should be counted, valid files downloaded + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded, "Should download 2 valid files"); + Assert.AreEqual(1, response.ObjectsFailed, "Should have 1 path validation failure"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.AreEqual(1, response.Errors.Count); + + // Verify the error is an AmazonClientException (path validation error) + Assert.IsInstanceOfType(response.Errors[0], typeof(AmazonClientException)); + Assert.IsTrue(response.Errors[0].Message.Contains("not allowed outside")); + + // Verify valid files were downloaded + Assert.IsTrue(File.Exists(Path.Combine(localDir, "valid1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "valid2.txt"))); + + // Verify ObjectDownloadFailedEvent was raised for path validation failure + Assert.AreEqual(1, captured.Count); + Assert.IsInstanceOfType(captured[0].Exception, typeof(AmazonClientException)); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_PathTraversalAttack_AbortOnFailure_ThrowsOnValidationFailure() + { + // Test path traversal attack with AbortOnFailure + var keys = new[] { + "prefix/file1.txt", + "prefix/../../../secrets.txt" // Path traversal attempt + }; + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + // Should throw on path validation failure + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("not allowed outside")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_MixedValidationAndDownloadFailures_ContinueOnFailure_TracksAllFailures() + { + // Test mixed path validation failures + download failures + var keys = new[] { + "prefix/good.txt", // Should succeed + "prefix/../../bad-path.txt", // Path validation failure + "prefix/download-fail.txt", // Download failure + "prefix/another-good.txt" // Should succeed + }; + + var mockS3 = CreateMockS3(keys, k => k.EndsWith("download-fail.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var captured = new List(); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + }; + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // Assert: Both failure types should be tracked + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded, "Should download 2 valid files"); + Assert.AreEqual(2, response.ObjectsFailed, "Should have 2 failures (1 validation + 1 download)"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.AreEqual(2, response.Errors.Count); + + // Verify both error types are present + var hasClientException = response.Errors.Any(e => e is AmazonClientException && e.Message.Contains("not allowed outside")); + var hasS3Exception = response.Errors.Any(e => e is AmazonS3Exception); + Assert.IsTrue(hasClientException, "Should have path validation error"); + Assert.IsTrue(hasS3Exception, "Should have download failure error"); + + // Verify events were raised for both failures + Assert.AreEqual(2, captured.Count); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion + + #region Sequential Mode Tests + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_SequentialMode_MultipleFailures_ContinueOnFailure() + { + // Test sequential download mode with multiple failures + var keys = new[] { + "prefix/file1.txt", // Success + "prefix/file2.txt", // Failure + "prefix/file3.txt", // Success + "prefix/file4.txt", // Failure + "prefix/file5.txt" // Success + }; + var mockS3 = CreateMockS3(keys, k => k.Contains("file2") || k.Contains("file4")); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + request.DownloadFilesConcurrently = false; // Sequential mode + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded, "Should download 3 files successfully"); + Assert.AreEqual(2, response.ObjectsFailed, "Should have 2 failures"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + + // Verify correct files were downloaded + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file3.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file5.txt"))); + Assert.IsFalse(File.Exists(Path.Combine(localDir, "file2.txt"))); + Assert.IsFalse(File.Exists(Path.Combine(localDir, "file4.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_SequentialMode_FirstFileFailure_AbortOnFailure() + { + // Test AbortOnFailure in sequential mode when first file fails + var keys = new[] { + "prefix/fail-first.txt", + "prefix/should-not-download1.txt", + "prefix/should-not-download2.txt" + }; + var mockS3 = CreateMockS3(keys, k => k.Contains("fail-first")); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + request.DownloadFilesConcurrently = false; // Sequential mode + + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("fail-first")); + + // Should not have downloaded any other files + Assert.AreEqual(0, Directory.GetFiles(localDir).Length); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion + + #region Concurrency Control Under Failure Tests + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_LimitedConcurrency_MultipleFailures_ContinueOnFailure() + { + // Test that failures are properly handled with limited concurrency + var keys = new[] { + "prefix/file1.txt", + "prefix/file2.txt", + "prefix/file3.txt", + "prefix/file4.txt", + "prefix/file5.txt", + "prefix/file6.txt" + }; + + // Make files 2, 4, and 6 fail + var mockS3 = CreateMockS3(keys, k => k.Contains("file2") || k.Contains("file4") || k.Contains("file6")); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 // Limit to 2 concurrent downloads + }; + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded, "Should download 3 files successfully"); + Assert.AreEqual(3, response.ObjectsFailed, "Should have 3 failures"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.AreEqual(3, response.Errors.Count); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_LimitedConcurrency_EarlyFailure_AbortOnFailure() + { + // Test that AbortOnFailure cancels pending tasks with limited concurrency + var keys = new[] { + "prefix/file1.txt", + "prefix/file2-fail.txt", // This will fail + "prefix/file3.txt", + "prefix/file4.txt", + "prefix/file5.txt" + }; + + var mockS3 = CreateMockS3(keys, k => k.Contains("file2-fail")); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 + }; + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("file2-fail")); + + // Some files may have downloaded before the failure, but not all + Assert.IsTrue(Directory.GetFiles(localDir).Length < keys.Length); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion + + #region Validation Phase Failure with AbortOnFailure + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ValidationPhaseFailure_AbortOnFailure_StopsImmediately() + { + // Test that AbortOnFailure stops on validation failure (before download phase) + var keys = new[] { + "prefix/file1.txt", + "prefix/../../../escape.txt", // Path validation will fail + "prefix/file2.txt" + }; + + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var captured = new List(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + }; + + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("not allowed outside")); + + // Verify event was raised for validation failure + Assert.AreEqual(1, captured.Count); + Assert.IsInstanceOfType(captured[0].Exception, typeof(AmazonClientException)); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_MultipleValidationFailures_ContinueOnFailure_SkipsAllInvalid() + { + // Test that multiple path validation failures are all handled correctly + var keys = new[] { + "prefix/good1.txt", + "prefix/../../bad1.txt", + "prefix/good2.txt", + "prefix/../../../bad2.txt", + "prefix/good3.txt", + "prefix/../../../../bad3.txt" + }; + + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded, "Should download 3 valid files"); + Assert.AreEqual(3, response.ObjectsFailed, "Should have 3 path validation failures"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + + // All errors should be AmazonClientException + Assert.IsTrue(response.Errors.All(e => e is AmazonClientException)); + Assert.IsTrue(response.Errors.All(e => e.Message.Contains("not allowed outside"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion } } From a65b3ec88d18412781df6d1e54b6038cf08d7336 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 11:14:55 -0500 Subject: [PATCH 37/56] Update code to acquire capacity before starting task (#4182) --- .../Internal/MultipartDownloadManager.cs | 64 +- .../Custom/MultipartDownloadManagerTests.cs | 553 ++++++++++++++++++ 2 files changed, 589 insertions(+), 28 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index b4e395ccf019..1d8abb71dcc7 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -270,31 +270,39 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E return; } - // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) - Logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", - discoveryResult.TotalParts); - - for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) - { - var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); - downloadTasks.Add(task); - } - - // Store count before WhenAllOrFirstException (which modifies the list internally) - var expectedTaskCount = downloadTasks.Count; - - Logger.DebugFormat("MultipartDownloadManager: Starting {0} download tasks in background", expectedTaskCount); - // Check if already cancelled before creating background task cancellationToken.ThrowIfCancellationRequested(); - // Start background task to wait for all downloads to complete + // Start background task to handle capacity acquisition and task creation // This allows the method to return immediately so the consumer can start reading // which prevents deadlock when MaxInMemoryParts is reached before consumer begins reading _downloadCompletionTask = Task.Run(async () => { try { + Logger.DebugFormat("MultipartDownloadManager: Background task starting capacity acquisition and downloads"); + + // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) + Logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", + discoveryResult.TotalParts); + + // Pre-acquire capacity in sequential order to prevent race condition deadlock + // This ensures Part 2 gets capacity before Part 3, etc., preventing out-of-order + // parts from consuming all buffer slots and blocking the next expected part + for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) + { + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); + + // Acquire capacity sequentially - guarantees Part 2 before Part 3, etc. + await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); + + var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); + downloadTasks.Add(task); + } + + var expectedTaskCount = downloadTasks.Count; Logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); // Wait for all downloads to complete (fails fast on first exception) @@ -330,6 +338,12 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E throw; } #pragma warning restore CA1031 // Do not catch general exception types + finally + { + // Dispose the CancellationTokenSource after all background operations complete + // This ensures the token remains valid for the entire lifetime of download tasks + internalCts.Dispose(); + } }, cancellationToken); // Return immediately to allow consumer to start reading @@ -342,25 +356,19 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E Logger.Error(ex, "MultipartDownloadManager: Download failed"); _dataHandler.OnDownloadComplete(ex); - throw; - } - finally - { + + // Dispose the CancellationTokenSource if background task was never started + // This handles the case where an error occurs before Task.Run is called internalCts.Dispose(); + + throw; } } private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, EventHandler progressCallback, CancellationToken cancellationToken) - { - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNumber); - - // Wait for capacity before starting download - await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); - - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNumber); - + { GetObjectResponse response = null; var ownsResponse = false; // Track if we still own the response diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index 3f068d85af40..c1b00afdfef3 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -729,6 +729,559 @@ public async Task Validation_ContentRange_ValidRange_Succeeds() #endregion + #region Sequential Capacity Acquisition Tests + + [TestMethod] + public async Task StartDownloadsAsync_MultipartDownload_AcquiresCapacitySequentially() + { + // Arrange - Test that capacity is acquired in sequential order (Part 2 before Part 3, etc.) + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityAcquisitionOrder = new List(); + var capacityAcquisitionLock = new object(); + + var mockDataHandler = new Mock(); + + // Track capacity acquisition order + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + lock (capacityAcquisitionLock) + { + // This will be called for parts 2, 3, 4 in that order + capacityAcquisitionOrder.Add(capacityAcquisitionOrder.Count + 2); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for background task completion + await coordinator.DownloadCompletionTask; + + // Assert - Capacity should be acquired in sequential order: Part 2, then Part 3, then Part 4 + lock (capacityAcquisitionLock) + { + Assert.AreEqual(3, capacityAcquisitionOrder.Count, "Should acquire capacity for parts 2, 3, 4"); + Assert.AreEqual(2, capacityAcquisitionOrder[0], "First capacity acquisition should be for Part 2"); + Assert.AreEqual(3, capacityAcquisitionOrder[1], "Second capacity acquisition should be for Part 3"); + Assert.AreEqual(4, capacityAcquisitionOrder[2], "Third capacity acquisition should be for Part 4"); + } + } + + [TestMethod] + public async Task StartDownloadsAsync_MultipartDownload_DoesNotCallWaitForCapacityInCreateDownloadTask() + { + // Arrange - Test that CreateDownloadTaskAsync no longer calls WaitForCapacityAsync + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var waitForCapacityCallCount = 0; + var processPartCallCount = 0; + + var mockDataHandler = new Mock(); + + // Track WaitForCapacityAsync calls - should only be called in background task, not in CreateDownloadTaskAsync + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref waitForCapacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref processPartCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert + // WaitForCapacityAsync should be called exactly once per background part (parts 2 and 3) + Assert.AreEqual(2, waitForCapacityCallCount, + "WaitForCapacityAsync should be called exactly once per background part (2 times for parts 2-3)"); + + // ProcessPartAsync should be called for all parts (1, 2, 3) + Assert.AreEqual(3, processPartCallCount, + "ProcessPartAsync should be called for all parts (3 times for parts 1-3)"); + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTask_PreAcquiresCapacityBeforeCreatingTasks() + { + // Arrange - Test that background task pre-acquires all capacity before creating download tasks + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Track operation order with sequential counter + var operationOrder = new List<(string operation, int partNum, int sequence)>(); + var lockObject = new object(); + var operationCounter = 0; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + lock (lockObject) + { + var partNum = operationOrder.Count(o => o.operation == "capacity") + 2; // Parts 2, 3 + operationOrder.Add(("capacity", partNum, operationCounter++)); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + lock (lockObject) + { + operationOrder.Add(("task", partNum, operationCounter++)); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert + lock (lockObject) + { + var capacityOps = operationOrder.Where(o => o.operation == "capacity").ToList(); + var taskOps = operationOrder.Where(o => o.operation == "task").ToList(); + + Assert.AreEqual(2, capacityOps.Count, "Should acquire capacity for parts 2-3"); + Assert.AreEqual(3, taskOps.Count, "Should create tasks for parts 1-3"); + + // Verify all capacity acquisitions happened before any task creation + // Find the highest sequence number among capacity operations + var lastCapacitySequence = capacityOps.Max(o => o.sequence); + + // Find the lowest sequence number among task operations + var firstTaskSequence = taskOps.Min(o => o.sequence); + + // All capacity must be acquired (have lower sequence numbers) before tasks start + Assert.IsTrue(lastCapacitySequence < firstTaskSequence, + $"All capacity acquisitions must complete before task creation. " + + $"Last capacity sequence: {lastCapacitySequence}, First task sequence: {firstTaskSequence}. " + + $"Operations: {string.Join(", ", operationOrder.Select(o => $"{o.operation}({o.partNum})={o.sequence}"))}"); + + // Additional verification: Part 1 should be first task (processed during StartDownloadsAsync) + var part1Task = taskOps.FirstOrDefault(o => o.partNum == 1); + Assert.IsNotNull(part1Task, "Part 1 should be processed"); + Assert.IsTrue(part1Task.sequence < lastCapacitySequence, + "Part 1 should be processed before capacity acquisition for background parts"); + } + } + + #endregion + + #region Race Condition Prevention Tests + + [TestMethod] + public async Task StartDownloadsAsync_PreventRaceConditionDeadlock_WithLimitedBuffer() + { + // Arrange - Test scenario that could deadlock with old approach: limited buffer + out-of-order completion + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Simulate a scenario where buffer is limited and parts could complete out of order + var maxInMemoryParts = 2; // Very limited buffer + var capacitySlots = new SemaphoreSlim(maxInMemoryParts); + var partProcessingOrder = new List(); + var lockObject = new object(); + + var mockDataHandler = new Mock(); + + // Simulate capacity checking - old approach could deadlock here + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(async () => + { + // Wait for capacity (this is where the old approach could deadlock) + await capacitySlots.WaitAsync(); + // Note: In real implementation, capacity would be released when part is processed + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + lock (lockObject) + { + partProcessingOrder.Add(partNum); + } + + // Release capacity after processing + capacitySlots.Release(); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act - This should not deadlock with the new sequential approach + var startTime = DateTime.UtcNow; + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + var endTime = DateTime.UtcNow; + + // Assert + var executionTime = (endTime - startTime).TotalSeconds; + Assert.IsTrue(executionTime < 10, + $"Download should complete without deadlock. Took {executionTime:F2} seconds"); + + lock (lockObject) + { + Assert.AreEqual(totalParts, partProcessingOrder.Count, + "All parts should be processed successfully"); + + // Part 1 should be first (processed during StartDownloadsAsync) + Assert.AreEqual(1, partProcessingOrder[0], "Part 1 should be processed first"); + } + } + + [TestMethod] + public async Task StartDownloadsAsync_SequentialCapacityAcquisition_PreventsOutOfOrderBlocking() + { + // Arrange - Test that sequential acquisition prevents out-of-order parts from blocking expected parts + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityOrder = new List(); + var processingOrder = new List(); + var lockObject = new object(); + + var mockDataHandler = new Mock(); + + var partCounter = 1; // Start with part 2 (part 1 doesn't call WaitForCapacityAsync) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + lock (lockObject) + { + partCounter++; + capacityOrder.Add(partCounter); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + lock (lockObject) + { + processingOrder.Add(partNum); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert - Capacity acquisition should be in order, preventing blocking + lock (lockObject) + { + Assert.AreEqual(3, capacityOrder.Count, "Should acquire capacity for parts 2, 3, 4"); + + // Verify sequential order + for (int i = 0; i < capacityOrder.Count; i++) + { + Assert.AreEqual(i + 2, capacityOrder[i], + $"Capacity acquisition {i} should be for part {i + 2}"); + } + + Assert.AreEqual(totalParts, processingOrder.Count, "All parts should be processed"); + } + } + + #endregion + + #region Background Task Resource Management Tests + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTaskSuccess_DisposesCancellationTokenSource() + { + // Arrange - Test that CancellationTokenSource is disposed after successful background operations + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for background task to complete + await coordinator.DownloadCompletionTask; + + // Assert - Background task should complete successfully + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted && + !coordinator.DownloadCompletionTask.IsFaulted && + !coordinator.DownloadCompletionTask.IsCanceled, + "Background task should complete successfully"); + + Assert.IsNull(coordinator.DownloadException, + "No download exception should occur"); + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTaskFailure_DisposesCancellationTokenSource() + { + // Arrange - Test that CancellationTokenSource is disposed even when background task fails + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockDataHandler = new Mock(); + + // First call (Part 1) succeeds + var callCount = 0; + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + callCount++; + if (partNum == 1) + { + return Task.CompletedTask; // Part 1 succeeds + } + throw new InvalidOperationException("Simulated download failure"); // Background parts fail + }); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for background task to complete (with failure) + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure + } + + // Assert - Background task should have failed but cleanup should be done + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "Background task should be completed (even with failure)"); + Assert.IsNotNull(coordinator.DownloadException, + "Download exception should be captured"); + Assert.IsInstanceOfType(coordinator.DownloadException, typeof(InvalidOperationException), + "Should capture the simulated failure"); + } + + [TestMethod] + public async Task StartDownloadsAsync_EarlyError_DisposesCancellationTokenSource() + { + // Arrange - Test CancellationTokenSource disposal when error occurs before background task starts + var mockDataHandler = new Mock(); + + // Simulate error during PrepareAsync (before background task is created) + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = new DownloadDiscoveryResult + { + TotalParts = 2, + ObjectSize = 16 * 1024 * 1024, + InitialResponse = new GetObjectResponse() + }; + + // Act & Assert + try + { + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated prepare failure", ex.Message); + } + + // Assert - Exception should be captured and no background task should exist + Assert.IsNotNull(coordinator.DownloadException, "Download exception should be captured"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "DownloadCompletionTask should return completed task when no background work exists"); + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTaskCancellation_HandlesTokenDisposalProperly() + { + // Arrange - Test proper token disposal when background task is cancelled + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + // Part 1 succeeds, then cancel before background parts + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Cancel when waiting for capacity (simulating cancellation during background task) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + cts.Cancel(); // Cancel during background task execution + throw new OperationCanceledException(); + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for background task cancellation + try + { + await coordinator.DownloadCompletionTask; + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - Cancellation should be handled properly with cleanup + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "Background task should be completed"); + Assert.IsNotNull(coordinator.DownloadException, + "Cancellation exception should be captured"); + } + + #endregion + #region Disposal Tests [TestMethod] From 7671451c6cd625bcee4e7f41fc1e284c32adf223 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 11:18:47 -0500 Subject: [PATCH 38/56] use throttling for discoverpart (#4183) --- .../Internal/MultipartDownloadManager.cs | 40 +- .../Custom/MultipartDownloadManagerTests.cs | 448 ++++++++++++++++-- 2 files changed, 456 insertions(+), 32 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 1d8abb71dcc7..7ccab05cafe0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -486,8 +486,24 @@ private async Task DiscoverUsingPartStrategyAsync(Cance var firstPartRequest = CreateGetObjectRequest(); firstPartRequest.PartNumber = 1; - // SEP Part GET Step 2: "send the request and wait for the response in a non-blocking fashion" - var firstPartResponse = await _s3Client.GetObjectAsync(firstPartRequest, cancellationToken).ConfigureAwait(false); + // Wait for both capacity types before making HTTP request (consistent with background parts) + Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); + await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); + await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); + + GetObjectResponse firstPartResponse = null; + try + { + // SEP Part GET Step 2: "send the request and wait for the response in a non-blocking fashion" + firstPartResponse = await _s3Client.GetObjectAsync(firstPartRequest, cancellationToken).ConfigureAwait(false); + } + finally + { + _httpConcurrencySlots.Release(); + Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); + } if (firstPartResponse == null) throw new InvalidOperationException("Failed to retrieve object from S3"); @@ -547,8 +563,24 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc var firstRangeRequest = CreateGetObjectRequest(); firstRangeRequest.ByteRange = new ByteRange(0, targetPartSize - 1); - // SEP Ranged GET Step 2: "send the request and wait for the response in a non-blocking fashion" - var firstRangeResponse = await _s3Client.GetObjectAsync(firstRangeRequest, cancellationToken).ConfigureAwait(false); + // Wait for both capacity types before making HTTP request (consistent with background parts) + Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); + await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); + await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); + + GetObjectResponse firstRangeResponse = null; + try + { + // SEP Ranged GET Step 2: "send the request and wait for the response in a non-blocking fashion" + firstRangeResponse = await _s3Client.GetObjectAsync(firstRangeRequest, cancellationToken).ConfigureAwait(false); + } + finally + { + _httpConcurrencySlots.Release(); + Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); + } // Defensive null check if (firstRangeResponse == null) diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index c1b00afdfef3..c2d1926bd823 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -734,7 +734,7 @@ public async Task Validation_ContentRange_ValidRange_Succeeds() [TestMethod] public async Task StartDownloadsAsync_MultipartDownload_AcquiresCapacitySequentially() { - // Arrange - Test that capacity is acquired in sequential order (Part 2 before Part 3, etc.) + // Arrange - Test that capacity is acquired in sequential order (Part 1 discovery, then Part 2, 3, 4 background) var totalParts = 4; var partSize = 8 * 1024 * 1024; var totalObjectSize = totalParts * partSize; @@ -744,15 +744,25 @@ public async Task StartDownloadsAsync_MultipartDownload_AcquiresCapacitySequenti var mockDataHandler = new Mock(); - // Track capacity acquisition order + // Track capacity acquisition order - now includes Part 1 discovery + var callCount = 0; mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(() => { lock (capacityAcquisitionLock) { - // This will be called for parts 2, 3, 4 in that order - capacityAcquisitionOrder.Add(capacityAcquisitionOrder.Count + 2); + callCount++; + if (callCount == 1) + { + // First call is Part 1 discovery + capacityAcquisitionOrder.Add(1); + } + else + { + // Subsequent calls are background parts 2, 3, 4 + capacityAcquisitionOrder.Add(callCount); + } } return Task.CompletedTask; }); @@ -780,20 +790,21 @@ public async Task StartDownloadsAsync_MultipartDownload_AcquiresCapacitySequenti // Wait for background task completion await coordinator.DownloadCompletionTask; - // Assert - Capacity should be acquired in sequential order: Part 2, then Part 3, then Part 4 + // Assert - Capacity should be acquired in order: Part 1 (discovery), then Parts 2, 3, 4 (background) lock (capacityAcquisitionLock) { - Assert.AreEqual(3, capacityAcquisitionOrder.Count, "Should acquire capacity for parts 2, 3, 4"); - Assert.AreEqual(2, capacityAcquisitionOrder[0], "First capacity acquisition should be for Part 2"); - Assert.AreEqual(3, capacityAcquisitionOrder[1], "Second capacity acquisition should be for Part 3"); - Assert.AreEqual(4, capacityAcquisitionOrder[2], "Third capacity acquisition should be for Part 4"); + Assert.AreEqual(4, capacityAcquisitionOrder.Count, "Should acquire capacity for parts 1 (discovery), 2, 3, 4 (background)"); + Assert.AreEqual(1, capacityAcquisitionOrder[0], "First capacity acquisition should be for Part 1 discovery"); + Assert.AreEqual(2, capacityAcquisitionOrder[1], "Second capacity acquisition should be for Part 2 background"); + Assert.AreEqual(3, capacityAcquisitionOrder[2], "Third capacity acquisition should be for Part 3 background"); + Assert.AreEqual(4, capacityAcquisitionOrder[3], "Fourth capacity acquisition should be for Part 4 background"); } } [TestMethod] public async Task StartDownloadsAsync_MultipartDownload_DoesNotCallWaitForCapacityInCreateDownloadTask() { - // Arrange - Test that CreateDownloadTaskAsync no longer calls WaitForCapacityAsync + // Arrange - Test that CreateDownloadTaskAsync no longer calls WaitForCapacityAsync (capacity is pre-acquired) var totalParts = 3; var partSize = 8 * 1024 * 1024; var totalObjectSize = totalParts * partSize; @@ -803,7 +814,7 @@ public async Task StartDownloadsAsync_MultipartDownload_DoesNotCallWaitForCapaci var mockDataHandler = new Mock(); - // Track WaitForCapacityAsync calls - should only be called in background task, not in CreateDownloadTaskAsync + // Track WaitForCapacityAsync calls - now includes Part 1 discovery + background parts 2-3 mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(() => @@ -838,9 +849,9 @@ public async Task StartDownloadsAsync_MultipartDownload_DoesNotCallWaitForCapaci await coordinator.DownloadCompletionTask; // Assert - // WaitForCapacityAsync should be called exactly once per background part (parts 2 and 3) - Assert.AreEqual(2, waitForCapacityCallCount, - "WaitForCapacityAsync should be called exactly once per background part (2 times for parts 2-3)"); + // WaitForCapacityAsync should be called for Part 1 discovery + background parts 2-3 (total 3 calls) + Assert.AreEqual(3, waitForCapacityCallCount, + "WaitForCapacityAsync should be called for Part 1 discovery + background parts 2-3 (3 times total)"); // ProcessPartAsync should be called for all parts (1, 2, 3) Assert.AreEqual(3, processPartCallCount, @@ -908,7 +919,7 @@ public async Task StartDownloadsAsync_BackgroundTask_PreAcquiresCapacityBeforeCr var capacityOps = operationOrder.Where(o => o.operation == "capacity").ToList(); var taskOps = operationOrder.Where(o => o.operation == "task").ToList(); - Assert.AreEqual(2, capacityOps.Count, "Should acquire capacity for parts 2-3"); + Assert.AreEqual(3, capacityOps.Count, "Should acquire capacity discovery part 1 and for parts 2-3"); Assert.AreEqual(3, taskOps.Count, "Should create tasks for parts 1-3"); // Verify all capacity acquisitions happened before any task creation @@ -1024,7 +1035,7 @@ public async Task StartDownloadsAsync_SequentialCapacityAcquisition_PreventsOutO var mockDataHandler = new Mock(); - var partCounter = 1; // Start with part 2 (part 1 doesn't call WaitForCapacityAsync) + var partCounter = 0; // Start with part 1 (Part 1 discovery now calls WaitForCapacityAsync) mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(() => @@ -1068,13 +1079,13 @@ public async Task StartDownloadsAsync_SequentialCapacityAcquisition_PreventsOutO // Assert - Capacity acquisition should be in order, preventing blocking lock (lockObject) { - Assert.AreEqual(3, capacityOrder.Count, "Should acquire capacity for parts 2, 3, 4"); + Assert.AreEqual(4, capacityOrder.Count, "Should acquire capacity for Part 1 discovery + parts 2, 3, 4 background"); - // Verify sequential order + // Verify sequential order: Part 1 (discovery), then Parts 2, 3, 4 (background) for (int i = 0; i < capacityOrder.Count; i++) { - Assert.AreEqual(i + 2, capacityOrder[i], - $"Capacity acquisition {i} should be for part {i + 2}"); + Assert.AreEqual(i + 1, capacityOrder[i], + $"Capacity acquisition {i} should be for part {i + 1}"); } Assert.AreEqual(totalParts, processingOrder.Count, "All parts should be processed"); @@ -1233,20 +1244,31 @@ public async Task StartDownloadsAsync_BackgroundTaskCancellation_HandlesTokenDis var cts = new CancellationTokenSource(); var mockDataHandler = new Mock(); - // Part 1 succeeds, then cancel before background parts - mockDataHandler - .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) - .Returns(Task.CompletedTask); - - // Cancel when waiting for capacity (simulating cancellation during background task) + // Part 1 discovery succeeds (now also calls WaitForCapacityAsync) + var callCount = 0; mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(() => { - cts.Cancel(); // Cancel during background task execution - throw new OperationCanceledException(); + callCount++; + if (callCount == 1) + { + // First call (Part 1 discovery) succeeds + return Task.CompletedTask; + } + else + { + // Second call (background task) cancels + cts.Cancel(); // Cancel during background task execution + throw new OperationCanceledException(); + } }); + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockDataHandler .Setup(x => x.OnDownloadComplete(It.IsAny())); @@ -1807,6 +1829,376 @@ public async Task StartDownloadsAsync_SinglePart_ReturnsImmediatelyWithoutBackgr #endregion + #region Capacity Checking Tests + + [TestMethod] + public async Task DiscoverUsingPartStrategy_CallsWaitForCapacityAsync() + { + // Arrange + var capacityCallCount = 0; + var mockDataHandler = new Mock(); + + // Track WaitForCapacityAsync calls + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + 8 * 1024 * 1024, 3, 24 * 1024 * 1024, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, capacityCallCount, "WaitForCapacityAsync should be called exactly once during Part 1 discovery"); + Assert.IsNotNull(result); + Assert.AreEqual(3, result.TotalParts); + + // Verify the mock was called with correct setup + mockDataHandler.Verify(x => x.WaitForCapacityAsync(It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_CallsWaitForCapacityAsync() + { + // Arrange + var capacityCallCount = 0; + var mockDataHandler = new Mock(); + + // Track WaitForCapacityAsync calls + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, capacityCallCount, "WaitForCapacityAsync should be called exactly once during Part 1 discovery"); + Assert.IsNotNull(result); + Assert.AreEqual(7, result.TotalParts); // 52428800 / 8388608 = 6.25 -> 7 parts + + // Verify the mock was called with correct setup + mockDataHandler.Verify(x => x.WaitForCapacityAsync(It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_AcquiresAndReleasesHttpSlot() + { + // Arrange - Use real SemaphoreSlim to track HTTP concurrency usage + var httpThrottler = new SemaphoreSlim(2, 2); // 2 concurrent requests max + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = CreateMockDataHandler(); + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + 8 * 1024 * 1024, 3, 24 * 1024 * 1024, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + // Use shared HTTP throttler to track usage + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should be released after discovery completes"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_AcquiresAndReleasesHttpSlot() + { + // Arrange - Use real SemaphoreSlim to track HTTP concurrency usage + var httpThrottler = new SemaphoreSlim(2, 2); // 2 concurrent requests max + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = CreateMockDataHandler(); + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + // Use shared HTTP throttler to track usage + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should be released after discovery completes"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task MultipleDownloads_WithSharedHttpThrottler_RespectsLimits() + { + // Arrange - Simulate directory download scenario with shared throttler + var sharedThrottler = new SemaphoreSlim(1, 1); // Very limited: 1 concurrent request + var mockDataHandler1 = CreateMockDataHandler(); + var mockDataHandler2 = CreateMockDataHandler(); + + // Create two download managers sharing the same HTTP throttler + var mockResponse1 = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "file1-etag"); + var mockResponse2 = MultipartDownloadTestHelpers.CreateSinglePartResponse(2048, "file2-etag"); + + var mockClient1 = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse1)); + var mockClient2 = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse2)); + + var request1 = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var request2 = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + var coordinator1 = new MultipartDownloadManager(mockClient1.Object, request1, config, mockDataHandler1.Object, null, sharedThrottler); + var coordinator2 = new MultipartDownloadManager(mockClient2.Object, request2, config, mockDataHandler2.Object, null, sharedThrottler); + + // Act - Start both discoveries concurrently + var task1 = coordinator1.DiscoverDownloadStrategyAsync(CancellationToken.None); + var task2 = coordinator2.DiscoverDownloadStrategyAsync(CancellationToken.None); + + await Task.WhenAll(task1, task2); + + // Assert - Both should complete successfully despite shared throttler limits + Assert.IsNotNull(task1.Result); + Assert.IsNotNull(task2.Result); + Assert.AreEqual(1, sharedThrottler.CurrentCount, "HTTP throttler should be fully released"); + + // Cleanup + coordinator1.Dispose(); + coordinator2.Dispose(); + sharedThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_HttpRequestFails_ReleasesCapacityProperly() + { + // Arrange - Simulate HTTP request failure + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = CreateMockDataHandler(); + var mockClient = new Mock(); + + // HTTP request throws exception + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated S3 failure")); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated S3 failure", ex.Message); + } + + // Assert - HTTP concurrency should be properly released even after failure + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should be released even when HTTP request fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_CancellationDuringCapacityWait_ReleasesHttpSlotProperly() + { + // Arrange - Test cancellation during capacity acquisition + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + // Cancel during capacity wait + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + cts.Cancel(); + throw new OperationCanceledException(); + }); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + Assert.Fail("Expected OperationCanceledException to be thrown"); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - HTTP slot should still be available (never acquired due to early cancellation) + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should remain available when cancelled before HTTP request"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_CancellationAfterCapacityButBeforeHttp_ReleasesHttpSlotProperly() + { + // Arrange - Test cancellation after capacity but before HTTP call + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + // Capacity acquisition succeeds + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // HTTP call gets cancelled + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + cts.Token.ThrowIfCancellationRequested(); + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + cts.Cancel(); // Cancel before discovery + await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + Assert.Fail("Expected OperationCanceledException to be thrown"); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - HTTP slot should be properly released by finally block + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should be released by finally block on cancellation"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_SinglePart_StillCallsCapacityCheck() + { + // Arrange - Even single-part downloads should check capacity during discovery + var capacityCallCount = 0; + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Act + var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(1, result.TotalParts); + Assert.AreEqual(1, capacityCallCount, + "Even single-part downloads should call WaitForCapacityAsync during discovery"); + } + + #endregion + #region ContentRange and Part Range Calculation Tests [TestMethod] From b86e148bd0366d787d47725a35479f0d3fff73aa Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 11:48:53 -0500 Subject: [PATCH 39/56] refactor upload directory and fix concurrency bug (#4186) --- .../UploadDirectoryCommand.cs | 325 +++++++++++------- 1 file changed, 202 insertions(+), 123 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index e56c811fbb36..9baf333eafcf 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -31,154 +31,233 @@ internal partial class UploadDirectoryCommand : BaseCommand ExecuteAsync(CancellationToken cancellationToken) { + // Step 1: Setup paths and discover files string prefix = GetKeyPrefix(); string basePath = new DirectoryInfo(this._request.Directory).FullName; - _logger.DebugFormat("UploadDirectoryCommand starting. BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Starting - BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", basePath, prefix, UploadFilesConcurrently, this._config.ConcurrentServiceRequests); - string[] filePaths = await GetFiles(basePath, this._request.SearchPattern, this._request.SearchOption, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + // Step 2: Discover files to upload + string[] filePaths = await DiscoverFilesAsync(basePath, cancellationToken) + .ConfigureAwait(false); + this._totalNumberOfFiles = filePaths.Length; + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Discovered {0} file(s) to upload. TotalBytes={1}", + _totalNumberOfFiles, _totalBytes); - _logger.DebugFormat("Discovered {0} file(s) to upload. TotalBytes={1}", _totalNumberOfFiles, _totalBytes); - - // Two-level throttling architecture: - // 1. File-level throttler: Controls how many files are uploaded concurrently - // 2. HTTP-level throttler: Controls total HTTP requests across ALL file uploads - // - // Example with ConcurrentServiceRequests = 10: - // - fileOperationThrottler = 10: Up to 10 files can upload simultaneously - // - sharedHttpRequestThrottler = 10: All 10 files share 10 total HTTP request slots - // - Without HTTP throttler: Would result in 10 files × 10 parts = 100 concurrent HTTP requests - // - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files - // - // This prevents resource exhaustion when uploading many large files with multipart uploads. - SemaphoreSlim sharedHttpRequestThrottler = null; - SemaphoreSlim fileOperationThrottler = null; - CancellationTokenSource internalCts = null; - try + // Step 3: Setup resources and execute uploads + using (var resources = CreateUploadResources(cancellationToken)) { - var pendingTasks = new List(); - - // File-level throttler: Controls concurrent file operations - fileOperationThrottler = UploadFilesConcurrently ? - new SemaphoreSlim(this._config.ConcurrentServiceRequests) : - new SemaphoreSlim(1); - _logger.DebugFormat("Created fileOperationThrottler with initial count={0}", UploadFilesConcurrently ? this._config.ConcurrentServiceRequests : 1); - - // HTTP-level throttler: Shared across all uploads to control total HTTP concurrency - sharedHttpRequestThrottler = this._utility.S3Client is Amazon.S3.Internal.IAmazonS3Encryption ? - // If we are using AmazonS3EncryptionClient, don't set the HTTP throttler. - // The fileOperationThrottler will be used to control how many files are uploaded in parallel. - // Each upload (multipart) will upload parts serially. - null : - // Use a throttler which will be shared between simple and multipart uploads - // to control total concurrent HTTP requests across all file operations. - new SemaphoreSlim(this._config.ConcurrentServiceRequests); - if (sharedHttpRequestThrottler == null) - { - _logger.Debug(null, "sharedHttpRequestThrottler disabled due to encryption client. Multipart uploads will be serial per file."); - } - else - { - _logger.DebugFormat("Created sharedHttpRequestThrottler with initial count={0}", this._config.ConcurrentServiceRequests); - } + await ExecuteParallelUploadsAsync( + filePaths, + basePath, + prefix, + resources, + cancellationToken) + .ConfigureAwait(false); + } + + // Step 4: Build and return response + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", + _numberOfFilesSuccessfullyUploaded, _errors.Count); + + return BuildResponse(); + } + + /// + /// Encapsulates disposable resources used during directory upload. + /// + private sealed class UploadResources : IDisposable + { + public SemaphoreSlim HttpRequestThrottler { get; } + public CancellationTokenSource InternalCancellationTokenSource { get; } + + public UploadResources( + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource cancellationTokenSource) + { + HttpRequestThrottler = httpRequestThrottler; + InternalCancellationTokenSource = cancellationTokenSource; + } - internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + public void Dispose() + { + InternalCancellationTokenSource?.Dispose(); + HttpRequestThrottler?.Dispose(); + } + } - foreach (string filepath in filePaths) + /// + /// Discovers files to upload from the local directory and calculates total bytes. + /// + private async Task DiscoverFilesAsync(string basePath, CancellationToken cancellationToken) + { + return await Task.Run(() => + { + var filePaths = Directory.GetFiles( + basePath, + this._request.SearchPattern, + this._request.SearchOption); + + foreach (var filePath in filePaths) { - _logger.DebugFormat("Waiting for fileOperationThrottler to schedule file."); - await fileOperationThrottler.WaitAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); - _logger.DebugFormat("Acquired fileOperationThrottler. Currently scheduled: {0}", pendingTasks.Count + 1); - - try - { - cancellationToken.ThrowIfCancellationRequested(); - if (internalCts.IsCancellationRequested) - { - // Operation cancelled as one of the upload requests failed with an exception, - // don't schedule any more upload tasks. - // Don't throw an OperationCanceledException here as we want to process the - // responses and throw the original exception. - _logger.Debug(null, "Internal cancellation requested; breaking out of scheduling loop."); - break; - } - - var uploadRequest = ConstructRequest(basePath, filepath, prefix); - - Action onFailure = (ex) => - { - this._request.OnRaiseObjectUploadFailedEvent( - new ObjectUploadFailedEventArgs( - this._request, - uploadRequest, - ex)); - }; - - var task = _failurePolicy.ExecuteAsync( - async () => { - _logger.DebugFormat("Starting upload command"); - var command = _utility.GetUploadCommand(uploadRequest, sharedHttpRequestThrottler); - await command.ExecuteAsync(internalCts.Token) - .ConfigureAwait(false); - var uploaded = Interlocked.Increment(ref _numberOfFilesSuccessfullyUploaded); - _logger.DebugFormat("Completed upload. FilesSuccessfullyUploaded={0}", uploaded); - }, - onFailure, - internalCts - ); - - pendingTasks.Add(task); - _logger.DebugFormat("Scheduled upload task. PendingTasks=01}", pendingTasks.Count); - } - finally - { - fileOperationThrottler.Release(); - } + _totalBytes += new FileInfo(filePath).Length; } - _logger.DebugFormat("Awaiting completion of {0} scheduled task(s)", pendingTasks.Count); - await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + return filePaths; + }, cancellationToken).ConfigureAwait(false); + } + + /// + /// Creates resources needed for parallel uploads with proper throttling. + /// + /// Throttling architecture: + /// - Task pool pattern (ForEachWithConcurrencyAsync): Controls concurrent file uploads + /// - HttpRequestThrottler: Controls total HTTP requests across ALL file uploads + /// + /// Example with ConcurrentServiceRequests = 10: + /// - Task pool creates max 10 concurrent file upload tasks + /// - HttpRequestThrottler = 10: All files share 10 total HTTP request slots + /// - Without HTTP throttler: 10 multipart files × 10 parts = 100 concurrent HTTP requests + /// - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files + /// + /// Special case: When using AmazonS3EncryptionClient, HTTP throttler is disabled. + /// The task pool concurrency control is sufficient since encryption uploads are serial per file. + /// + private UploadResources CreateUploadResources(CancellationToken cancellationToken) + { + SemaphoreSlim httpRequestThrottler = null; + + // HTTP-level throttler: Shared across all uploads to control total HTTP concurrency + // Disabled for encryption client since each upload processes parts serially + if (this._utility.S3Client is Amazon.S3.Internal.IAmazonS3Encryption) + { + _logger.DebugFormat("UploadDirectoryCommand.CreateUploadResources: HTTP throttler disabled for encryption client. Multipart uploads will be serial per file."); } - finally + else { - internalCts.Dispose(); - fileOperationThrottler.Dispose(); - sharedHttpRequestThrottler?.Dispose(); - _logger.DebugFormat("UploadDirectoryCommand finished. FilesSuccessfullyUploaded={0}", _numberOfFilesSuccessfullyUploaded); + httpRequestThrottler = new SemaphoreSlim(this._config.ConcurrentServiceRequests); + _logger.DebugFormat("UploadDirectoryCommand.CreateUploadResources: Created HTTP throttler with MaxConcurrentRequests={0}", + this._config.ConcurrentServiceRequests); } + var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + + return new UploadResources(httpRequestThrottler, internalCts); + } + + /// + /// Executes parallel uploads of all files using task pool pattern. + /// Only creates as many tasks as the concurrency limit allows (not all files up front). + /// + private async Task ExecuteParallelUploadsAsync( + string[] filePaths, + string basePath, + string prefix, + UploadResources resources, + CancellationToken cancellationToken) + { + int concurrencyLevel = UploadFilesConcurrently + ? this._config.ConcurrentServiceRequests + : 1; + + _logger.DebugFormat("UploadDirectoryCommand.ExecuteParallelUploadsAsync: Starting task pool with ConcurrencyLevel={0}, TotalFiles={1}", + concurrencyLevel, filePaths.Length); + + await TaskHelpers.ForEachWithConcurrencyAsync( + filePaths, + concurrencyLevel, + async (filepath, ct) => + { + ct.ThrowIfCancellationRequested(); + + await UploadSingleFileAsync( + filepath, + basePath, + prefix, + resources.HttpRequestThrottler, + resources.InternalCancellationTokenSource) + .ConfigureAwait(false); + }, + cancellationToken) + .ConfigureAwait(false); + + _logger.DebugFormat("UploadDirectoryCommand.ExecuteParallelUploadsAsync: Task pool completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", + _numberOfFilesSuccessfullyUploaded, _errors.Count); + } + + /// + /// Uploads a single file to S3 with failure handling. + /// + private async Task UploadSingleFileAsync( + string filepath, + string basePath, + string prefix, + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource internalCts) + { + if (internalCts.IsCancellationRequested) + return; + + var uploadRequest = ConstructRequest(basePath, filepath, prefix); + + // Create failure callback + Action onFailure = (ex) => + { + this._request.OnRaiseObjectUploadFailedEvent( + new ObjectUploadFailedEventArgs( + this._request, + uploadRequest, + ex)); + }; + + // Execute upload with failure policy + await _failurePolicy.ExecuteAsync( + () => ExecuteUploadCommandAsync(uploadRequest, httpRequestThrottler, internalCts.Token), + onFailure, + internalCts + ).ConfigureAwait(false); + } + + /// + /// Creates and executes the appropriate upload command for the file. + /// + private async Task ExecuteUploadCommandAsync( + TransferUtilityUploadRequest uploadRequest, + SemaphoreSlim httpRequestThrottler, + CancellationToken cancellationToken) + { + _logger.DebugFormat("UploadDirectoryCommand.ExecuteUploadCommandAsync: Starting upload command"); + + var command = _utility.GetUploadCommand(uploadRequest, httpRequestThrottler); + await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + var uploaded = Interlocked.Increment(ref _numberOfFilesSuccessfullyUploaded); + _logger.DebugFormat("UploadDirectoryCommand.ExecuteUploadCommandAsync: Completed upload. FilesSuccessfullyUploaded={0}", uploaded); + } + + /// + /// Builds the response object based on upload results. + /// + private TransferUtilityUploadDirectoryResponse BuildResponse() + { var response = new TransferUtilityUploadDirectoryResponse { ObjectsUploaded = _numberOfFilesSuccessfullyUploaded, ObjectsFailed = _errors.Count, Errors = _errors.ToList(), - Result = _errors.Count == 0 ? - DirectoryResult.Success : - (_numberOfFilesSuccessfullyUploaded > 0 ? - DirectoryResult.PartialSuccess : - DirectoryResult.Failure) + Result = _errors.Count == 0 + ? DirectoryResult.Success + : (_numberOfFilesSuccessfullyUploaded > 0 + ? DirectoryResult.PartialSuccess + : DirectoryResult.Failure) }; - _logger.DebugFormat("Response summary: Uploaded={0}, Failed={1}, Result={2}", response.ObjectsUploaded, response.ObjectsFailed, response.Result); - return response; - } - - private Task GetFiles(string path, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken) - { - return Task.Run(() => - { - var filePaths = Directory.GetFiles(path, searchPattern, searchOption); - foreach (var filePath in filePaths) - { - _totalBytes += new FileInfo(filePath).Length; - } - return filePaths; - }, cancellationToken); + _logger.DebugFormat("UploadDirectoryCommand.BuildResponse: Uploaded={0}, Failed={1}, Result={2}", + response.ObjectsUploaded, response.ObjectsFailed, response.Result); + return response; } } } From 3f681f7a938e4ad86ccd7741522112d4cf8df1b7 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 12:44:57 -0500 Subject: [PATCH 40/56] DownloadDirectory Initiated, Failed and Completed Events (#4176) --- .../7f23582e-3225-487b-83e7-167cf17cb234.json | 11 + .../Internal/DownloadDirectoryCommand.cs | 33 ++ .../Internal/_async/DownloadCommand.async.cs | 6 +- .../_async/SimpleUploadCommand.async.cs | 4 +- .../DownloadDirectoryCommand.cs | 66 ++-- ...TransferUtilityDownloadDirectoryRequest.cs | 222 +++++++++++- ...rUtilityDownloadDirectoryLifecycleTests.cs | 320 ++++++++++++++++++ 7 files changed, 629 insertions(+), 33 deletions(-) create mode 100644 generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json create mode 100644 sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryLifecycleTests.cs diff --git a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json new file mode 100644 index 000000000000..179605fd4aab --- /dev/null +++ b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added DownloadDirectoryInitiatedEvent, DownloadDirectoryCompletedEvent, and DownloadDirectoryFailedEvent for Amazon.S3.Transfer.TransferUtility.DownloadDirectory." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 77f868a520fa..4c897a5e9f1b 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -48,6 +48,39 @@ internal partial class DownloadDirectoryCommand : BaseCommand ExecuteAsync(CancellationToken cancellationToken) { - ValidateRequest(); - FireTransferInitiatedEvent(); - + + ValidateRequest(); + GetObjectRequest getRequest = ConvertToGetObjectRequest(this._request); var maxRetries = _s3Client.Config.MaxErrorRetry; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs index 1d936f0bdf5c..3b350c1bd877 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs @@ -32,14 +32,14 @@ public override async Task ExecuteAsync(Cancellat { try { + FireTransferInitiatedEvent(); + if (AsyncThrottler != null) { await this.AsyncThrottler.WaitAsync(cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); } - FireTransferInitiatedEvent(); - var putRequest = ConstructRequest(); var response = await _s3Client.PutObjectAsync(putRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index f9a44ec2b83a..3e426198ff0a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -48,40 +48,52 @@ internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDir public override async Task ExecuteAsync(CancellationToken cancellationToken) { - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Starting - DownloadFilesConcurrently={0}, UseMultipartDownload={1}, ConcurrentServiceRequests={2}", - DownloadFilesConcurrently, this._useMultipartDownload, this._config.ConcurrentServiceRequests); + try + { + FireTransferInitiatedEvent(); - // Step 1: Validate and setup - ValidateRequest(); - EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Starting - DownloadFilesConcurrently={0}, UseMultipartDownload={1}, ConcurrentServiceRequests={2}", + DownloadFilesConcurrently, this._useMultipartDownload, this._config.ConcurrentServiceRequests); - // Step 2: List S3 objects - var (s3Objects, prefixLength) = await ListS3ObjectsAsync(cancellationToken) - .ConfigureAwait(false); + // Step 1: Validate and setup + ValidateRequest(); + EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); - this._totalNumberOfFilesToDownload = s3Objects.Count; - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Found {0} total objects, TotalBytes={1}", - s3Objects.Count, this._totalBytes); + // Step 2: List S3 objects + var (s3Objects, prefixLength) = await ListS3ObjectsAsync(cancellationToken) + .ConfigureAwait(false); - // Step 3: Filter to actual files (exclude directory markers) - var objectsToDownload = FilterObjectsToDownload(s3Objects); + this._totalNumberOfFilesToDownload = s3Objects.Count; + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Found {0} total objects, TotalBytes={1}", + s3Objects.Count, this._totalBytes); - // Step 4: Setup resources and execute downloads - using (var resources = CreateDownloadResources(cancellationToken)) - { - await ExecuteParallelDownloadsAsync( - objectsToDownload, - prefixLength, - resources, - cancellationToken) - .ConfigureAwait(false); - } + // Step 3: Filter to actual files (exclude directory markers) + var objectsToDownload = FilterObjectsToDownload(s3Objects); - // Step 5: Build response - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Completed - ObjectsDownloaded={0}, ObjectsFailed={1}", - _numberOfFilesDownloaded, _errors.Count); + // Step 4: Setup resources and execute downloads + using (var resources = CreateDownloadResources(cancellationToken)) + { + await ExecuteParallelDownloadsAsync( + objectsToDownload, + prefixLength, + resources, + cancellationToken) + .ConfigureAwait(false); + } + + // Step 5: Build response + Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Completed - ObjectsDownloaded={0}, ObjectsFailed={1}", + _numberOfFilesDownloaded, _errors.Count); - return BuildResponse(); + var response = BuildResponse(); + FireTransferCompletedEvent(response); + return response; + } + catch + { + FireTransferFailedEvent(); + throw; + } } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs index 9931d29c1e8c..c36b8e03cb3d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs @@ -96,6 +96,97 @@ internal void OnRaiseObjectDownloadFailedEvent(ObjectDownloadFailedEventArgs arg ObjectDownloadFailedEvent?.Invoke(this, args); } + /// + /// Occurs when the download directory operation is initiated. + /// + /// + /// + /// The DownloadDirectoryInitiatedEvent is fired when the download directory operation begins. + /// The DownloadDirectoryInitiatedEventArgs contains the original request information. + /// + /// + /// Attach event handlers to this event if you are interested in receiving + /// DownloadDirectoryInitiatedEvent notifications. + /// + /// + /// + /// private void downloadStarted(object sender, DownloadDirectoryInitiatedEventArgs args) + /// { + /// Console.WriteLine("Download directory started for bucket {0}", args.Request.BucketName); + /// } + /// + public event EventHandler DownloadDirectoryInitiatedEvent; + + /// + /// Occurs when the download directory operation is completed. + /// + /// + /// + /// The DownloadDirectoryCompletedEvent is fired when the download directory operation is completed successfully. + /// The DownloadDirectoryCompletedEventArgs contains a snapshot of the transfer state at completion. + /// + /// + /// Attach event handlers to this event if you are interested in receiving + /// DownloadDirectoryCompletedEvent notifications. + /// + /// + /// + /// private void downloadCompleted(object sender, DownloadDirectoryCompletedEventArgs args) + /// { + /// Console.WriteLine("Download directory completed with {0} files downloaded", args.TransferredFiles); + /// } + /// + public event EventHandler DownloadDirectoryCompletedEvent; + + /// + /// Occurs when the download directory operation fails. + /// + /// + /// + /// The DownloadDirectoryFailedEvent is fired when the download directory operation fails. + /// The DownloadDirectoryFailedEventArgs contains a snapshot of the transfer state at failure. + /// + /// + /// Attach event handlers to this event if you are interested in receiving + /// DownloadDirectoryFailedEvent notifications. + /// + /// + /// + /// private void downloadFailed(object sender, DownloadDirectoryFailedEventArgs args) + /// { + /// Console.WriteLine("Download directory failed with {0} files downloaded out of {1} total", + /// args.TransferredFiles, args.TotalFiles); + /// } + /// + public event EventHandler DownloadDirectoryFailedEvent; + + /// + /// Raises the DownloadDirectoryInitiatedEvent. + /// + /// DownloadDirectoryInitiatedEventArgs args + internal void OnRaiseDownloadDirectoryInitiatedEvent(DownloadDirectoryInitiatedEventArgs args) + { + DownloadDirectoryInitiatedEvent?.Invoke(this, args); + } + + /// + /// Raises the DownloadDirectoryCompletedEvent. + /// + /// DownloadDirectoryCompletedEventArgs args + internal void OnRaiseDownloadDirectoryCompletedEvent(DownloadDirectoryCompletedEventArgs args) + { + DownloadDirectoryCompletedEvent?.Invoke(this, args); + } + + /// + /// Raises the DownloadDirectoryFailedEvent. + /// + /// DownloadDirectoryFailedEventArgs args + internal void OnRaiseDownloadDirectoryFailedEvent(DownloadDirectoryFailedEventArgs args) + { + DownloadDirectoryFailedEvent?.Invoke(this, args); + } + /// /// Gets or sets the name of the bucket. /// @@ -668,4 +759,133 @@ internal ObjectDownloadFailedEventArgs( /// public Exception Exception { get; private set; } } -} \ No newline at end of file + + /// + /// Provides data for + /// which is raised when a download directory operation is initiated. + /// + public class DownloadDirectoryInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadDirectoryInitiatedEventArgs class. + /// + /// The transfer request + internal DownloadDirectoryInitiatedEventArgs(TransferUtilityDownloadDirectoryRequest request) + { + Request = request; + } + + /// + /// Gets the request associated with this transfer operation. + /// + public TransferUtilityDownloadDirectoryRequest Request { get; private set; } + } + + /// + /// Provides data for + /// which is raised when a download directory operation is completed successfully. + /// + public class DownloadDirectoryCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadDirectoryCompletedEventArgs class. + /// + /// The transfer request + /// The transfer response + /// The total number of bytes that have been transferred so far + /// The total size for all objects + /// The total number of files that have been transferred so far + /// The total number of files + internal DownloadDirectoryCompletedEventArgs(TransferUtilityDownloadDirectoryRequest request, + TransferUtilityDownloadDirectoryResponse response, long transferredBytes, long totalBytes, + long transferredFiles, long totalFiles) + { + Request = request; + Response = response; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + } + + /// + /// Gets the request associated with this transfer operation. + /// + public TransferUtilityDownloadDirectoryRequest Request { get; private set; } + + /// + /// Gets the response from the transfer operation. + /// + public TransferUtilityDownloadDirectoryResponse Response { get; private set; } + + /// + /// Gets the total number of bytes that have been transferred so far. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total size for all objects. Returns -1 if unknown. + /// + public long TotalBytes { get; private set; } + + /// + /// Gets the total number of files that have been transferred so far. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files. Returns -1 if unknown. + /// + public long TotalFiles { get; private set; } + } + + /// + /// Provides data for + /// which is raised when a download directory operation fails. + /// + public class DownloadDirectoryFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadDirectoryFailedEventArgs class. + /// + /// The transfer request + /// The total number of bytes that have been transferred so far + /// The total size for all objects + /// The total number of files that have been transferred so far + /// The total number of files + internal DownloadDirectoryFailedEventArgs(TransferUtilityDownloadDirectoryRequest request, + long transferredBytes, long totalBytes, long transferredFiles, long totalFiles) + { + Request = request; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + } + + /// + /// Gets the request associated with this transfer operation. + /// + public TransferUtilityDownloadDirectoryRequest Request { get; private set; } + + /// + /// Gets the total number of bytes that have been transferred so far. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total size for all objects. Returns -1 if unknown. + /// + public long TotalBytes { get; private set; } + + /// + /// Gets the total number of files that have been transferred so far. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files. Returns -1 if unknown. + /// + public long TotalFiles { get; private set; } + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryLifecycleTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryLifecycleTests.cs new file mode 100644 index 000000000000..a3f4fa660324 --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryLifecycleTests.cs @@ -0,0 +1,320 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility download directory lifecycle events. + /// Tests the initiated, completed, and failed events for directory downloads. + /// + [TestClass] + public class TransferUtilityDownloadDirectoryLifecycleTests : TestBase + { + public static readonly long MEG_SIZE = (int)Math.Pow(2, 20); + public static readonly long KILO_SIZE = (int)Math.Pow(2, 10); + public static readonly string BasePath = Path.Combine(Path.GetTempPath(), "transferutility", "downloaddirectorylifecycle"); + + private static string bucketName; + private static string plainTextContentType = "text/plain"; + + [ClassInitialize()] + public static void ClassInitialize(TestContext a) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + BaseClean(); + if (Directory.Exists(BasePath)) + { + Directory.Delete(BasePath, true); + } + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryInitiatedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Request.BucketName); + Assert.IsNotNull(args.Request.S3Directory); + Assert.IsNotNull(args.Request.LocalDirectory); + } + }; + DownloadDirectoryWithLifecycleEvents(10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryCompletedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + + // Verify progress information is available in completed event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles, "All files should be transferred"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes, "All bytes should be transferred"); + } + }; + DownloadDirectoryWithLifecycleEvents(12 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryFailedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + } + }; + + // Use an invalid bucket name to force a real exception + // Bucket names with uppercase letters are invalid and will cause an exception + var invalidBucketName = "INVALID-BUCKET-NAME-" + Guid.NewGuid().ToString(); + var testDirectory = "test-directory"; + + var localDirectory = GenerateDirectoryPath(); + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = invalidBucketName, // This will cause an exception due to invalid bucket name + LocalDirectory = localDirectory, + S3Directory = testDirectory + }; + + request.DownloadDirectoryFailedEvent += eventValidator.OnEventFired; + + try + { + transferUtility.DownloadDirectory(request); + Assert.Fail("Expected an exception to be thrown for invalid bucket name"); + } + catch (Exception ex) + { + // Expected exception - the failed event should have been fired + Console.WriteLine($"Expected exception caught: {ex.GetType().Name} - {ex.Message}"); + } + + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryCompleteLifecycleTest() + { + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(bucketName, args.Request.BucketName); + Assert.IsNotNull(args.Request.S3Directory); + Assert.IsNotNull(args.Request.LocalDirectory); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.IsTrue(args.TotalFiles > 0, "Should have downloaded at least one file"); + } + }; + + DownloadDirectoryWithLifecycleEvents(15 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + + #region Helper Methods + + void DownloadDirectoryWithLifecycleEvents(long fileSize, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + // First create and upload a test directory + var testDirectory = CreateAndUploadTestDirectory(fileSize); + var s3Directory = testDirectory.Name; + + DownloadDirectoryWithLifecycleEventsAndS3Directory(s3Directory, initiatedValidator, completedValidator, failedValidator); + } + + void DownloadDirectoryWithLifecycleEventsAndS3Directory(string s3Directory, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var localDirectory = GenerateDirectoryPath(); + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + LocalDirectory = localDirectory, + S3Directory = s3Directory + }; + + if (initiatedValidator != null) + { + request.DownloadDirectoryInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.DownloadDirectoryCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.DownloadDirectoryFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.DownloadDirectory(request); + + // Validate downloaded directory contents if it was successful + if (Directory.Exists(localDirectory)) + { + var downloadedFiles = Directory.GetFiles(localDirectory, "*", SearchOption.AllDirectories); + Console.WriteLine($"Downloaded {downloadedFiles.Length} files to {localDirectory}"); + } + } + + DirectoryInfo CreateAndUploadTestDirectory(long fileSize, int numberOfTestFiles = 3) + { + var directory = CreateTestDirectory(fileSize, numberOfTestFiles); + var keyPrefix = directory.Name; + var directoryPath = directory.FullName; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = directoryPath, + KeyPrefix = keyPrefix, + ContentType = plainTextContentType, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + }; + + transferUtility.UploadDirectory(request); + + // Validate the upload was successful + ValidateDirectoryContentsInS3(Client, bucketName, keyPrefix, directory); + + return directory; + } + + public static DirectoryInfo CreateTestDirectory(long fileSize = 0, int numberOfTestFiles = 3) + { + if (fileSize == 0) + fileSize = 1 * MEG_SIZE; + + var directoryPath = GenerateDirectoryPath(); + for (int i = 0; i < numberOfTestFiles; i++) + { + var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); + UtilityMethods.GenerateFile(filePath, fileSize); + } + + return new DirectoryInfo(directoryPath); + } + + public static string GenerateDirectoryPath(string baseName = "DownloadDirectoryLifecycleTest") + { + var directoryName = UtilityMethods.GenerateName(baseName); + var directoryPath = Path.Combine(BasePath, directoryName); + return directoryPath; + } + + public static void ValidateDirectoryContentsInS3(IAmazonS3 s3client, string bucketName, string keyPrefix, DirectoryInfo sourceDirectory) + { + var directoryPath = sourceDirectory.FullName; + var files = sourceDirectory.GetFiles("*", SearchOption.AllDirectories); + foreach (var file in files) + { + var filePath = file.FullName; + var relativePath = filePath.Substring(directoryPath.Length + 1); + var key = (!string.IsNullOrEmpty(keyPrefix) ? keyPrefix + "/" : string.Empty) + relativePath.Replace("\\", "/"); + + // Verify the object exists in S3 + var metadata = s3client.GetObjectMetadata(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + Assert.IsNotNull(metadata, $"Object {key} should exist in S3"); + } + } + + #endregion + + #region Shared Helper Classes + + class TransferLifecycleEventValidator + { + public Action Validate { get; set; } + public bool EventFired { get; private set; } + public Exception EventException { get; private set; } + + public void OnEventFired(object sender, T eventArgs) + { + try + { + Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); + Validate?.Invoke(eventArgs); + EventFired = true; // Only set if validation passes + } + catch (Exception ex) + { + EventException = ex; + EventFired = false; // Ensure we don't mark as fired on failure + Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); + // Don't re-throw, let AssertEventFired() handle it + } + } + + public void AssertEventFired() + { + if (EventException != null) + throw EventException; + Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); + } + } + + #endregion + } +} From a93e67b09760abd24182a6d9f2448ac2f3e4c55a Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 13:20:07 -0500 Subject: [PATCH 41/56] update logging (#4188) --- .../c49077d9-90b3-437f-b316-6d8d8833ae65.json | 11 +++ .../Internal/BufferedMultipartStream.cs | 17 ++--- .../Internal/BufferedPartDataHandler.cs | 29 ++++---- .../Transfer/Internal/FilePartDataHandler.cs | 27 +++---- .../Internal/MultipartDownloadCommand.cs | 10 +-- .../Internal/MultipartDownloadManager.cs | 73 +++++++++---------- .../Internal/MultipartUploadCommand.cs | 14 +--- .../Custom/Transfer/Internal/TaskHelpers.cs | 29 +++----- .../_async/MultipartDownloadCommand.async.cs | 14 ++-- .../_async/MultipartUploadCommand.async.cs | 26 +++---- .../OpenStreamWithResponseCommand.async.cs | 11 +-- .../DownloadDirectoryCommand.cs | 31 ++++---- .../S3/Custom/Transfer/TransferUtility.cs | 9 +-- 13 files changed, 135 insertions(+), 166 deletions(-) create mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json new file mode 100644 index 000000000000..1e2e348d0f87 --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Fix Transfer Utility internal Logger recursive property definition" + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs index 991793112657..7093c2aa6a7b 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs @@ -46,10 +46,7 @@ internal class BufferedMultipartStream : Stream private DownloadDiscoveryResult _discoveryResult; private long _totalBytesRead = 0; - private Logger Logger - { - get { return Logger.GetLogger(typeof(TransferUtility)); } - } + private readonly Logger _logger = Logger.GetLogger(typeof(BufferedMultipartStream)); /// /// Gets the containing metadata from the initial GetObject response. @@ -113,12 +110,12 @@ public async Task InitializeAsync(CancellationToken cancellationToken) if (_initialized) throw new InvalidOperationException("Stream has already been initialized"); - Logger.DebugFormat("BufferedMultipartStream: Starting initialization"); + _logger.DebugFormat("BufferedMultipartStream: Starting initialization"); _discoveryResult = await _downloadCoordinator.DiscoverDownloadStrategyAsync(cancellationToken) .ConfigureAwait(false); - Logger.DebugFormat("BufferedMultipartStream: Discovery completed - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", + _logger.DebugFormat("BufferedMultipartStream: Discovery completed - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", _discoveryResult.ObjectSize, _discoveryResult.TotalParts, _discoveryResult.IsSinglePart); @@ -127,7 +124,7 @@ await _downloadCoordinator.StartDownloadsAsync(_discoveryResult, null, cancellat .ConfigureAwait(false); _initialized = true; - Logger.DebugFormat("BufferedMultipartStream: Initialization completed successfully"); + _logger.DebugFormat("BufferedMultipartStream: Initialization completed successfully"); } /// @@ -168,7 +165,7 @@ public override async Task ReadAsync(byte[] buffer, int offset, int count, throw new ArgumentException("Offset and count exceed buffer bounds"); var currentPosition = Interlocked.Read(ref _totalBytesRead); - Logger.DebugFormat("BufferedMultipartStream: ReadAsync called - Position={0}, RequestedBytes={1}", + _logger.DebugFormat("BufferedMultipartStream: ReadAsync called - Position={0}, RequestedBytes={1}", currentPosition, count); var bytesRead = await _partBufferManager.ReadAsync(buffer, offset, count, cancellationToken) @@ -178,12 +175,12 @@ public override async Task ReadAsync(byte[] buffer, int offset, int count, if (bytesRead > 0) { Interlocked.Add(ref _totalBytesRead, bytesRead); - Logger.DebugFormat("BufferedMultipartStream: ReadAsync completed - BytesRead={0}, NewPosition={1}", + _logger.DebugFormat("BufferedMultipartStream: ReadAsync completed - BytesRead={0}, NewPosition={1}", bytesRead, currentPosition + bytesRead); } else { - Logger.DebugFormat("BufferedMultipartStream: ReadAsync returned EOF (0 bytes)"); + _logger.DebugFormat("BufferedMultipartStream: ReadAsync returned EOF (0 bytes)"); } return bytesRead; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs index 33e8a7f9816d..a52e5a159758 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -55,10 +55,7 @@ internal class BufferedPartDataHandler : IPartDataHandler private readonly IPartBufferManager _partBufferManager; private readonly BufferedDownloadConfiguration _config; - private Logger Logger - { - get { return Logger.GetLogger(typeof(TransferUtility)); } - } + private readonly Logger _logger = Logger.GetLogger(typeof(BufferedPartDataHandler)); /// /// Initializes a new instance of the class. @@ -137,7 +134,7 @@ private async Task ProcessStreamingPartAsync( GetObjectResponse response, CancellationToken cancellationToken) { - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Matches NextExpectedPartNumber - streaming directly without buffering", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Matches NextExpectedPartNumber - streaming directly without buffering", partNumber); StreamingDataSource streamingDataSource = null; @@ -161,12 +158,12 @@ private async Task ProcessStreamingPartAsync( // Release capacity immediately since we're not holding anything in memory _partBufferManager.ReleaseBufferSpace(); - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] StreamingDataSource added and capacity released", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] StreamingDataSource added and capacity released", partNumber); } catch (Exception ex) { - Logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process streaming part", partNumber); + _logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process streaming part", partNumber); // Dispose response if we still own it (constructor failed before taking ownership) if (ownsResponse) @@ -206,7 +203,7 @@ private async Task ProcessBufferedPartAsync( GetObjectResponse response, CancellationToken cancellationToken) { - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Out of order (NextExpected={1}) - buffering to memory", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Out of order (NextExpected={1}) - buffering to memory", partNumber, _partBufferManager.NextExpectedPartNumber); try @@ -220,18 +217,18 @@ private async Task ProcessBufferedPartAsync( // Response has been fully read and buffered - dispose it now response?.Dispose(); - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Buffered {1} bytes into memory", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Buffered {1} bytes into memory", partNumber, buffer.Length); // Add the buffered part to the buffer manager _partBufferManager.AddBuffer(buffer); - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Added to buffer manager (capacity will be released after consumption)", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Added to buffer manager (capacity will be released after consumption)", partNumber); } catch (Exception ex) { - Logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process buffered part", partNumber); + _logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process buffered part", partNumber); // We own the response throughout this method, so dispose it on error response?.Dispose(); @@ -286,7 +283,7 @@ private async Task BufferPartFromResponseAsync( long expectedBytes = response.ContentLength; int initialBufferSize = (int)expectedBytes; - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Allocating buffer of size {1} bytes from ArrayPool", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Allocating buffer of size {1} bytes from ArrayPool", partNumber, initialBufferSize); downloadedPart = StreamPartBuffer.Create(partNumber, initialBufferSize); @@ -299,7 +296,7 @@ private async Task BufferPartFromResponseAsync( // The MemoryStream starts at position 0 and can grow up to initialBufferSize using (var memoryStream = new MemoryStream(partBuffer, 0, initialBufferSize, writable: true)) { - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Reading response stream into buffer", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Reading response stream into buffer", partNumber); // Use GetObjectResponse's stream copy logic which includes: @@ -316,7 +313,7 @@ await response.WriteResponseStreamAsync( int totalRead = (int)memoryStream.Position; - Logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Read {1} bytes from response stream", + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Read {1} bytes from response stream", partNumber, totalRead); // Set the length to reflect actual bytes read @@ -324,7 +321,7 @@ await response.WriteResponseStreamAsync( if (totalRead != expectedBytes) { - Logger.Error(null, "BufferedPartDataHandler: [Part {0}] Size mismatch - Expected {1} bytes, read {2} bytes", + _logger.Error(null, "BufferedPartDataHandler: [Part {0}] Size mismatch - Expected {1} bytes, read {2} bytes", partNumber, expectedBytes, totalRead); } } @@ -333,7 +330,7 @@ await response.WriteResponseStreamAsync( } catch (Exception ex) { - Logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to buffer part from response stream", partNumber); + _logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to buffer part from response stream", partNumber); // If something goes wrong, StreamPartBuffer.Dispose() will handle cleanup downloadedPart?.Dispose(); throw; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs index 1d1e4452b311..da9210465fde 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs @@ -42,10 +42,7 @@ internal class FilePartDataHandler : IPartDataHandler private string _tempFilePath; private bool _disposed = false; - private Logger Logger - { - get { return Logger.GetLogger(typeof(TransferUtility)); } - } + private readonly Logger _logger = Logger.GetLogger(typeof(FilePartDataHandler)); /// /// Initializes a new instance for file downloads. @@ -63,7 +60,7 @@ public Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationTo // Create temporary file once during preparation phase _tempFilePath = _fileHandler.CreateTemporaryFile(_config.DestinationFilePath); - Logger.DebugFormat("FilePartDataHandler: Created temporary file for download"); + _logger.DebugFormat("FilePartDataHandler: Created temporary file for download"); return Task.CompletedTask; } @@ -83,20 +80,20 @@ public async Task ProcessPartAsync( { try { - Logger.DebugFormat("FilePartDataHandler: [Part {0}] Starting to process part - ContentLength={1}", + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Starting to process part - ContentLength={1}", partNumber, response.ContentLength); // Calculate offset for this part based on ContentRange or part number long offset = GetPartOffset(response, partNumber); - Logger.DebugFormat("FilePartDataHandler: [Part {0}] Calculated file offset={1}", + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Calculated file offset={1}", partNumber, offset); // Write part data to file at the calculated offset await WritePartToFileAsync(offset, response, cancellationToken) .ConfigureAwait(false); - Logger.DebugFormat("FilePartDataHandler: [Part {0}] File write completed successfully", + _logger.DebugFormat("FilePartDataHandler: [Part {0}] File write completed successfully", partNumber); } finally @@ -128,17 +125,17 @@ public void OnDownloadComplete(Exception exception) if (exception == null) { // Success - commit temp file to final destination - Logger.DebugFormat("FilePartDataHandler: Download complete, committing temporary file to destination"); + _logger.DebugFormat("FilePartDataHandler: Download complete, committing temporary file to destination"); try { _fileHandler.CommitFile(_tempFilePath, _config.DestinationFilePath); - Logger.DebugFormat("FilePartDataHandler: Successfully committed file to destination"); + _logger.DebugFormat("FilePartDataHandler: Successfully committed file to destination"); } catch (Exception commitException) { - Logger.Error(commitException, "FilePartDataHandler: Failed to commit file to destination"); + _logger.Error(commitException, "FilePartDataHandler: Failed to commit file to destination"); // Cleanup on commit failure _fileHandler.CleanupOnFailure(); @@ -149,7 +146,7 @@ public void OnDownloadComplete(Exception exception) else { // Failure - cleanup temp file - Logger.Error(exception, "FilePartDataHandler: Download failed, cleaning up temporary file"); + _logger.Error(exception, "FilePartDataHandler: Download failed, cleaning up temporary file"); _fileHandler.CleanupOnFailure(); } @@ -202,7 +199,7 @@ private async Task WritePartToFileAsync( if (string.IsNullOrEmpty(_tempFilePath)) throw new InvalidOperationException("Temporary file has not been created"); - Logger.DebugFormat("FilePartDataHandler: Opening file for writing at offset {0} with BufferSize={1}", + _logger.DebugFormat("FilePartDataHandler: Opening file for writing at offset {0} with BufferSize={1}", offset, _config.BufferSize); // Open file with FileShare.Write to allow concurrent writes from other threads @@ -216,7 +213,7 @@ private async Task WritePartToFileAsync( // Seek to the correct offset for this part fileStream.Seek(offset, SeekOrigin.Begin); - Logger.DebugFormat("FilePartDataHandler: Writing {0} bytes to file at offset {1}", + _logger.DebugFormat("FilePartDataHandler: Writing {0} bytes to file at offset {1}", response.ContentLength, offset); // Use GetObjectResponse's stream copy logic which includes: @@ -235,7 +232,7 @@ await response.WriteResponseStreamAsync( await fileStream.FlushAsync(cancellationToken) .ConfigureAwait(false); - Logger.DebugFormat("FilePartDataHandler: Successfully wrote {0} bytes at offset {1}", + _logger.DebugFormat("FilePartDataHandler: Successfully wrote {0} bytes at offset {1}", response.ContentLength, offset); } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs index 809c00c2d3dc..e282decfbc18 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs @@ -42,13 +42,7 @@ internal partial class MultipartDownloadCommand : BaseCommand /// Initializes a new instance of the MultipartDownloadCommand class for single file downloads. @@ -118,7 +112,7 @@ private FileDownloadConfiguration CreateConfiguration() // Use S3 client buffer size for I/O operations int bufferSize = _s3Client.Config.BufferSize; - Logger.DebugFormat("MultipartDownloadCommand: Creating configuration - PartSizeFromRequest={0}, UsingDefaultPartSize={1}", + _logger.DebugFormat("MultipartDownloadCommand: Creating configuration - PartSizeFromRequest={0}, UsingDefaultPartSize={1}", _request.IsSetPartSize() ? _request.PartSize.ToString() : "Not Set", !_request.IsSetPartSize()); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 7ccab05cafe0..0208ae789823 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -66,10 +66,7 @@ internal class MultipartDownloadManager : IDownloadManager // Uses int instead of bool because Interlocked.CompareExchange requires reference types private int _completionEventFired = 0; // 0 = false, 1 = true - private Logger Logger - { - get { return Logger.GetLogger(typeof(TransferUtility)); } - } + private readonly Logger _logger = Logger.GetLogger(typeof(MultipartDownloadManager)); /// /// Task that completes when all downloads finish (successfully or with error). @@ -184,7 +181,7 @@ public async Task DiscoverDownloadStrategyAsync(Cancell if (_discoveryCompleted) throw new InvalidOperationException("Discovery has already been performed"); - Logger.DebugFormat("MultipartDownloadManager: Starting discovery with strategy={0}", + _logger.DebugFormat("MultipartDownloadManager: Starting discovery with strategy={0}", _request.MultipartDownloadType); try @@ -196,7 +193,7 @@ public async Task DiscoverDownloadStrategyAsync(Cancell _discoveryCompleted = true; - Logger.InfoFormat("MultipartDownloadManager: Discovery complete - ObjectSize={0}, TotalParts={1}, Strategy={2}, ETagPresent={3}", + _logger.InfoFormat("MultipartDownloadManager: Discovery complete - ObjectSize={0}, TotalParts={1}, Strategy={2}, ETagPresent={3}", result.ObjectSize, result.TotalParts, _request.MultipartDownloadType, @@ -207,7 +204,7 @@ public async Task DiscoverDownloadStrategyAsync(Cancell catch (Exception ex) { _downloadException = ex; - Logger.Error(ex, "MultipartDownloadManager: Discovery failed"); + _logger.Error(ex, "MultipartDownloadManager: Discovery failed"); throw; } } @@ -224,7 +221,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E _userProgressCallback = progressCallback; _totalObjectSize = discoveryResult.ObjectSize; - Logger.DebugFormat("MultipartDownloadManager: Starting downloads - TotalParts={0}, IsSinglePart={1}", + _logger.DebugFormat("MultipartDownloadManager: Starting downloads - TotalParts={0}, IsSinglePart={1}", discoveryResult.TotalParts, discoveryResult.IsSinglePart); var downloadTasks = new List(); @@ -249,7 +246,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E } // Process Part 1 from InitialResponse (applies to both single-part and multipart) - Logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); + _logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); } finally @@ -265,7 +262,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E if (discoveryResult.IsSinglePart) { // Single-part: Part 1 is the entire object - Logger.DebugFormat("MultipartDownloadManager: Single-part download complete"); + _logger.DebugFormat("MultipartDownloadManager: Single-part download complete"); _dataHandler.OnDownloadComplete(null); return; } @@ -280,10 +277,10 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E { try { - Logger.DebugFormat("MultipartDownloadManager: Background task starting capacity acquisition and downloads"); + _logger.DebugFormat("MultipartDownloadManager: Background task starting capacity acquisition and downloads"); // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) - Logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", + _logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", discoveryResult.TotalParts); // Pre-acquire capacity in sequential order to prevent race condition deadlock @@ -291,24 +288,24 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E // parts from consuming all buffer slots and blocking the next expected part for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) { - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); // Acquire capacity sequentially - guarantees Part 2 before Part 3, etc. await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); downloadTasks.Add(task); } var expectedTaskCount = downloadTasks.Count; - Logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); + _logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); // Wait for all downloads to complete (fails fast on first exception) await TaskHelpers.WhenAllOrFirstExceptionAsync(downloadTasks, cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); + _logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); // SEP Part GET Step 6 / Ranged GET Step 8: // "validate that the total number of part GET requests sent matches with the expected PartsCount" @@ -324,7 +321,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E } // Mark successful completion - Logger.InfoFormat("MultipartDownloadManager: Download completed successfully - TotalParts={0}", + _logger.InfoFormat("MultipartDownloadManager: Download completed successfully - TotalParts={0}", discoveryResult.TotalParts); _dataHandler.OnDownloadComplete(null); } @@ -333,7 +330,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E catch (Exception ex) { _downloadException = ex; - Logger.Error(ex, "MultipartDownloadManager: Background download task failed"); + _logger.Error(ex, "MultipartDownloadManager: Background download task failed"); _dataHandler.OnDownloadComplete(ex); throw; } @@ -348,12 +345,12 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E // Return immediately to allow consumer to start reading // This prevents deadlock when buffer fills up before consumer begins reading - Logger.DebugFormat("MultipartDownloadManager: Returning to allow consumer to start reading"); + _logger.DebugFormat("MultipartDownloadManager: Returning to allow consumer to start reading"); } catch (Exception ex) { _downloadException = ex; - Logger.Error(ex, "MultipartDownloadManager: Download failed"); + _logger.Error(ex, "MultipartDownloadManager: Download failed"); _dataHandler.OnDownloadComplete(ex); @@ -374,13 +371,13 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even try { - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); // Limit HTTP concurrency await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNumber); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNumber); try { @@ -397,7 +394,7 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even // for each request to the Etag value saved from Step 3" getObjectRequest.EtagToMatch = _savedETag; - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with PartNumber={1}, IfMatchPresent={2}", + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with PartNumber={1}, IfMatchPresent={2}", partNumber, partNumber, !string.IsNullOrEmpty(_savedETag)); } else @@ -412,7 +409,7 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even // for each request to the value saved from Step 5" getObjectRequest.EtagToMatch = _savedETag; - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with ByteRange={1}-{2}, IfMatchPresent={3}", + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with ByteRange={1}-{2}, IfMatchPresent={3}", partNumber, startByte, endByte, !string.IsNullOrEmpty(_savedETag)); } @@ -425,31 +422,31 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even response.WriteObjectProgressEvent += progressCallback; } - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] GetObject response received - ContentLength={1}", + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] GetObject response received - ContentLength={1}", partNumber, response.ContentLength); // SEP Part GET Step 5 / Ranged GET Step 7: Validate ContentRange matches request ValidateContentRange(response, partNumber, objectSize); - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] ContentRange validation passed", partNumber); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] ContentRange validation passed", partNumber); // Validate ETag consistency for SEP compliance if (!string.IsNullOrEmpty(_savedETag) && !string.Equals(_savedETag, response.ETag, StringComparison.OrdinalIgnoreCase)) { - Logger.Error(null, "MultipartDownloadManager: [Part {0}] ETag mismatch detected - object modified during download", partNumber); + _logger.Error(null, "MultipartDownloadManager: [Part {0}] ETag mismatch detected - object modified during download", partNumber); throw new InvalidOperationException($"ETag mismatch detected for part {partNumber} - object may have been modified during download"); } - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] ETag validation passed", partNumber); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] ETag validation passed", partNumber); } finally { _httpConcurrencySlots.Release(); - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released (Available: {1}/{2})", + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released (Available: {1}/{2})", partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); } - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing part (handler will decide: stream or buffer)", partNumber); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing part (handler will decide: stream or buffer)", partNumber); // Delegate data handling to the handler // IMPORTANT: Handler takes ownership of response and is responsible for disposing it in ALL cases: @@ -459,11 +456,11 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even await _dataHandler.ProcessPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); ownsResponse = false; // Ownership transferred to handler - Logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing completed successfully", partNumber); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing completed successfully", partNumber); } catch (Exception ex) { - Logger.Error(ex, "MultipartDownloadManager: [Part {0}] Download failed", partNumber); + _logger.Error(ex, "MultipartDownloadManager: [Part {0}] Download failed", partNumber); // Dispose response if we still own it (error occurred before handler took ownership) if (ownsResponse) @@ -487,10 +484,10 @@ private async Task DiscoverUsingPartStrategyAsync(Cance firstPartRequest.PartNumber = 1; // Wait for both capacity types before making HTTP request (consistent with background parts) - Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); GetObjectResponse firstPartResponse = null; @@ -502,7 +499,7 @@ private async Task DiscoverUsingPartStrategyAsync(Cance finally { _httpConcurrencySlots.Release(); - Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); } if (firstPartResponse == null) @@ -564,10 +561,10 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc firstRangeRequest.ByteRange = new ByteRange(0, targetPartSize - 1); // Wait for both capacity types before making HTTP request (consistent with background parts) - Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); GetObjectResponse firstRangeResponse = null; @@ -579,7 +576,7 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc finally { _httpConcurrencySlots.Release(); - Logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); } // Defensive null check diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index 9c6374502885..c9d7041aa987 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -50,13 +50,7 @@ internal partial class MultipartUploadCommand : BaseCommand _partsToUpload = new Queue(); long _contentLength; - private static Logger Logger - { - get - { - return Logger.GetLogger(typeof(TransferUtility)); - } - } + private readonly Logger _logger = Logger.GetLogger(typeof(MultipartUploadCommand)); /// /// Initializes a new instance of the class. @@ -70,11 +64,11 @@ internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config if (fileTransporterRequest.IsSetFilePath()) { - Logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); + _logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); } else { - Logger.DebugFormat("Beginning upload of stream."); + _logger.DebugFormat("Beginning upload of stream."); } this._s3Client = s3Client; @@ -95,7 +89,7 @@ internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config } } - Logger.DebugFormat("Upload part size {0}.", this._partSize); + _logger.DebugFormat("Upload part size {0}.", this._partSize); } private static long calculatePartSize(long contentLength, long targetPartSize) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs index 02f233462222..4ca8db0c4fea 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs @@ -27,11 +27,6 @@ namespace Amazon.S3.Transfer.Internal /// internal static class TaskHelpers { - private static Logger Logger - { - get { return Logger.GetLogger(typeof(TaskHelpers)); } - } - /// /// Waits for all tasks to complete or till any task fails or is canceled. /// @@ -43,7 +38,7 @@ internal static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, int processed = 0; int total = pendingTasks.Count; - Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); while (processed < total) { @@ -60,11 +55,11 @@ await completedTask pendingTasks.Remove(completedTask); processed++; - Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", processed, total, pendingTasks.Count); } - Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); } /// @@ -81,7 +76,7 @@ internal static async Task> WhenAllOrFirstExceptionAsync(List int total = pendingTasks.Count; var responses = new List(); - Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); while (processed < total) { @@ -99,11 +94,11 @@ internal static async Task> WhenAllOrFirstExceptionAsync(List pendingTasks.Remove(completedTask); processed++; - Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", processed, total, pendingTasks.Count); } - Logger.DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); return responses; } @@ -134,11 +129,11 @@ internal static async Task ForEachWithConcurrencyAsync( var itemList = items as IList ?? items.ToList(); if (itemList.Count == 0) { - Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: No items to process"); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: No items to process"); return; } - Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting with TotalItems={0}, MaxConcurrency={1}", + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting with TotalItems={0}, MaxConcurrency={1}", itemList.Count, maxConcurrency); int nextIndex = 0; @@ -146,7 +141,7 @@ internal static async Task ForEachWithConcurrencyAsync( // Start initial batch up to concurrency limit int initialBatchSize = Math.Min(maxConcurrency, itemList.Count); - Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting initial batch of {0} tasks", initialBatchSize); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting initial batch of {0} tasks", initialBatchSize); for (int i = 0; i < initialBatchSize; i++) { @@ -170,20 +165,20 @@ await completedTask activeTasks.Remove(completedTask); int itemsCompleted = nextIndex - activeTasks.Count; - Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Task completed (Active={0}, Completed={1}/{2}, Remaining={3})", + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Task completed (Active={0}, Completed={1}/{2}, Remaining={3})", activeTasks.Count, itemsCompleted, itemList.Count, itemList.Count - itemsCompleted); // Start next task if more work remains if (nextIndex < itemList.Count) { - Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting next task (Index={0}/{1}, Active={2})", + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting next task (Index={0}/{1}, Active={2})", nextIndex + 1, itemList.Count, activeTasks.Count + 1); var nextTask = processAsync(itemList[nextIndex++], cancellationToken); activeTasks.Add(nextTask); } } - Logger.DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: All items processed (Total={0})", itemList.Count); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: All items processed (Total={0})", itemList.Count); } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs index 11a4e8ad8f45..cc58ffbbadac 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs @@ -40,7 +40,7 @@ public override async Task ExecuteAsync(Cancell // Create configuration from request settings var config = CreateConfiguration(); - Logger.DebugFormat("MultipartDownloadCommand: Configuration - ConcurrentServiceRequests={0}, BufferSize={1}, TargetPartSize={2}", + _logger.DebugFormat("MultipartDownloadCommand: Configuration - ConcurrentServiceRequests={0}, BufferSize={1}, TargetPartSize={2}", config.ConcurrentServiceRequests, config.BufferSize, config.TargetPartSizeBytes @@ -63,28 +63,28 @@ public override async Task ExecuteAsync(Cancell try { // Step 1: Discover download strategy (PART or RANGE) and get metadata - Logger.DebugFormat("MultipartDownloadCommand: Discovering download strategy"); + _logger.DebugFormat("MultipartDownloadCommand: Discovering download strategy"); var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(cancellationToken) .ConfigureAwait(false); totalBytes = discoveryResult.ObjectSize; - Logger.DebugFormat("MultipartDownloadCommand: Discovered {0} part(s), total size: {1} bytes, IsSinglePart={2}", + _logger.DebugFormat("MultipartDownloadCommand: Discovered {0} part(s), total size: {1} bytes, IsSinglePart={2}", discoveryResult.TotalParts, discoveryResult.ObjectSize, discoveryResult.IsSinglePart); // Step 2: Start concurrent downloads for all parts - Logger.DebugFormat("Starting downloads for {0} part(s)", discoveryResult.TotalParts); + _logger.DebugFormat("Starting downloads for {0} part(s)", discoveryResult.TotalParts); await coordinator.StartDownloadsAsync(discoveryResult, DownloadPartProgressEventCallback, cancellationToken) .ConfigureAwait(false); // Step 2b: Wait for all downloads to complete before returning // This ensures file is fully written and committed for file-based downloads // For stream-based downloads, this task completes immediately (no-op) - Logger.DebugFormat("MultipartDownloadCommand: Waiting for download completion"); + _logger.DebugFormat("MultipartDownloadCommand: Waiting for download completion"); await coordinator.DownloadCompletionTask.ConfigureAwait(false); - Logger.DebugFormat("MultipartDownloadCommand: Completed multipart download"); + _logger.DebugFormat("MultipartDownloadCommand: Completed multipart download"); // Step 3: Map the response from the initial GetObject response // The initial response contains all the metadata we need @@ -126,7 +126,7 @@ await coordinator.StartDownloadsAsync(discoveryResult, DownloadPartProgressEvent } catch (Exception ex) { - Logger.Error(ex, "Exception during multipart download"); + _logger.Error(ex, "Exception during multipart download"); // Fire failed event FireTransferFailedEvent(totalBytes); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index 0ea2f205258a..9c6983f9bdc0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -50,7 +50,7 @@ public override async Task ExecuteAsync(Cancellat var initRequest = ConstructInitiateMultipartUploadRequest(); initResponse = await _s3Client.InitiateMultipartUploadAsync(initRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - Logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); + _logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); } catch (Exception) { @@ -64,7 +64,7 @@ public override async Task ExecuteAsync(Cancellat try { - Logger.DebugFormat("Queue up the UploadPartRequests to be executed"); + _logger.DebugFormat("Queue up the UploadPartRequests to be executed"); long filePosition = 0; for (int i = 1; filePosition < this._contentLength; i++) { @@ -100,7 +100,7 @@ public override async Task ExecuteAsync(Cancellat this._totalNumberOfParts = this._partsToUpload.Count; - Logger.DebugFormat("Scheduling the {0} UploadPartRequests in the queue", this._totalNumberOfParts); + _logger.DebugFormat("Scheduling the {0} UploadPartRequests in the queue", this._totalNumberOfParts); internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); var concurrencyLevel = CalculateConcurrentServiceRequests(); @@ -132,15 +132,15 @@ await localThrottler.WaitAsync(cancellationToken) pendingUploadPartTasks.Add(task); } - Logger.DebugFormat("Waiting for upload part requests to complete. ({0})", initResponse.UploadId); + _logger.DebugFormat("Waiting for upload part requests to complete. ({0})", initResponse.UploadId); _uploadResponses = await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingUploadPartTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - Logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); + _logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); var compRequest = ConstructCompleteMultipartUploadRequest(initResponse); var completeResponse = await this._s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - Logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); + _logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); @@ -148,7 +148,7 @@ await localThrottler.WaitAsync(cancellationToken) } catch (Exception e) { - Logger.Error(e, "Exception while uploading. ({0})", initResponse?.UploadId ?? "unknown"); + _logger.Error(e, "Exception while uploading. ({0})", initResponse?.UploadId ?? "unknown"); FireTransferFailedEvent(); @@ -246,7 +246,7 @@ private void Cleanup(string uploadId, List> tasks) } catch(Exception exception) { - Logger.InfoFormat( + _logger.InfoFormat( "A timeout occured while waiting for all upload part request to complete as part of aborting the multipart upload : {0}", exception.Message); } @@ -273,7 +273,7 @@ private void AbortMultipartUpload(string uploadId) } catch (Exception e) { - Logger.InfoFormat("Error attempting to abort multipart for key {0}: {1}", this._fileTransporterRequest.Key, e.Message); + _logger.InfoFormat("Error attempting to abort multipart for key {0}: {1}", this._fileTransporterRequest.Key, e.Message); } } private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) @@ -302,7 +302,7 @@ private void AbortMultipartUpload(string uploadId) catch (Exception ex) { FireTransferFailedEvent(); - Logger.Error(ex, "Failed to initiate multipart upload for unseekable stream"); + _logger.Error(ex, "Failed to initiate multipart upload for unseekable stream"); throw; } @@ -351,7 +351,7 @@ private void AbortMultipartUpload(string uploadId) UploadPartRequest uploadPartRequest = ConstructUploadPartRequestForNonSeekableStream(nextUploadBuffer, partNumber, partSize, isLastPart, initiateResponse); var partResponse = await _s3Client.UploadPartAsync(uploadPartRequest, cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("Uploaded part {0}. (Last part = {1}, Part size = {2}, Upload Id: {3})", partNumber, isLastPart, partSize, initiateResponse.UploadId); + _logger.DebugFormat("Uploaded part {0}. (Last part = {1}, Part size = {2}, Upload Id: {3})", partNumber, isLastPart, partSize, initiateResponse.UploadId); uploadPartResponses.Add(partResponse); partNumber++; @@ -373,7 +373,7 @@ private void AbortMultipartUpload(string uploadId) this._uploadResponses = uploadPartResponses; CompleteMultipartUploadRequest compRequest = ConstructCompleteMultipartUploadRequest(initiateResponse, true, requestEventHandler); var completeResponse = await _s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("Completed multi part upload. (Part count: {0}, Upload Id: {1})", uploadPartResponses.Count, initiateResponse.UploadId); + _logger.DebugFormat("Completed multi part upload. (Part count: {0}, Upload Id: {1})", uploadPartResponses.Count, initiateResponse.UploadId); var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); @@ -391,7 +391,7 @@ await _s3Client.AbortMultipartUploadAsync(new AbortMultipartUploadRequest() RequestPayer = request.RequestPayer, UploadId = initiateResponse.UploadId }).ConfigureAwait(false); - Logger.Error(ex, ex.Message); + _logger.Error(ex, ex.Message); throw; } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs index 8a72d6b87164..3d4d3e197acc 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs @@ -27,17 +27,14 @@ namespace Amazon.S3.Transfer.Internal { internal partial class OpenStreamWithResponseCommand : BaseCommand { - private Logger Logger - { - get { return Logger.GetLogger(typeof(TransferUtility)); } - } + private readonly Logger _logger = Logger.GetLogger(typeof(OpenStreamWithResponseCommand)); public override async Task ExecuteAsync(CancellationToken cancellationToken) { - Logger.DebugFormat("OpenStreamWithResponseCommand: Creating BufferedMultipartStream with MultipartDownloadType={0}", + _logger.DebugFormat("OpenStreamWithResponseCommand: Creating BufferedMultipartStream with MultipartDownloadType={0}", _request.MultipartDownloadType); - Logger.DebugFormat("OpenStreamWithResponseCommand: Configuration - ConcurrentServiceRequests={0}, MaxInMemoryParts={1}, BufferSize={2}", + _logger.DebugFormat("OpenStreamWithResponseCommand: Configuration - ConcurrentServiceRequests={0}, MaxInMemoryParts={1}, BufferSize={2}", _config.ConcurrentServiceRequests, _request.MaxInMemoryParts, _s3Client.Config.BufferSize @@ -49,7 +46,7 @@ public override async Task ExecuteAsync(Cance // Populate metadata from the initial GetObject response (from discovery phase) var discoveryResult = bufferedStream.DiscoveryResult; - Logger.DebugFormat("OpenStreamWithResponseCommand: Stream initialized successfully - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", + _logger.DebugFormat("OpenStreamWithResponseCommand: Stream initialized successfully - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", discoveryResult.ObjectSize, discoveryResult.TotalParts, discoveryResult.IsSinglePart); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index 3e426198ff0a..85cb94ac4662 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -35,10 +35,7 @@ internal partial class DownloadDirectoryCommand : BaseCommand ExecuteAsyn { FireTransferInitiatedEvent(); - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Starting - DownloadFilesConcurrently={0}, UseMultipartDownload={1}, ConcurrentServiceRequests={2}", + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Starting - DownloadFilesConcurrently={0}, UseMultipartDownload={1}, ConcurrentServiceRequests={2}", DownloadFilesConcurrently, this._useMultipartDownload, this._config.ConcurrentServiceRequests); // Step 1: Validate and setup @@ -64,7 +61,7 @@ public override async Task ExecuteAsyn .ConfigureAwait(false); this._totalNumberOfFilesToDownload = s3Objects.Count; - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Found {0} total objects, TotalBytes={1}", + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Found {0} total objects, TotalBytes={1}", s3Objects.Count, this._totalBytes); // Step 3: Filter to actual files (exclude directory markers) @@ -82,7 +79,7 @@ await ExecuteParallelDownloadsAsync( } // Step 5: Build response - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Completed - ObjectsDownloaded={0}, ObjectsFailed={1}", + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Completed - ObjectsDownloaded={0}, ObjectsFailed={1}", _numberOfFilesDownloaded, _errors.Count); var response = BuildResponse(); @@ -162,7 +159,7 @@ private List FilterObjectsToDownload(List s3Objects) .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) .ToList(); - Logger.DebugFormat("DownloadDirectoryCommand.FilterObjectsToDownload: Filtered to {0} files to download (excluded {1} directory markers)", + _logger.DebugFormat("DownloadDirectoryCommand.FilterObjectsToDownload: Filtered to {0} files to download (excluded {1} directory markers)", filtered.Count, s3Objects.Count - filtered.Count); return filtered; @@ -191,7 +188,7 @@ private DownloadResources CreateDownloadResources(CancellationToken cancellation if (this._useMultipartDownload) { httpRequestThrottler = new SemaphoreSlim(this._config.ConcurrentServiceRequests); - Logger.DebugFormat("DownloadDirectoryCommand.CreateDownloadResources: Created HTTP throttler with MaxConcurrentRequests={0}", + _logger.DebugFormat("DownloadDirectoryCommand.CreateDownloadResources: Created HTTP throttler with MaxConcurrentRequests={0}", this._config.ConcurrentServiceRequests); } @@ -214,7 +211,7 @@ private async Task ExecuteParallelDownloadsAsync( ? this._config.ConcurrentServiceRequests : 1; - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Starting task pool with ConcurrencyLevel={0}, TotalFiles={1}", + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Starting task pool with ConcurrencyLevel={0}, TotalFiles={1}", concurrencyLevel, objectsToDownload.Count); await TaskHelpers.ForEachWithConcurrencyAsync( @@ -234,7 +231,7 @@ await DownloadSingleFileAsync( cancellationToken) .ConfigureAwait(false); - Logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Task pool completed - ObjectsDownloaded={0}, ObjectsFailed={1}", + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Task pool completed - ObjectsDownloaded={0}, ObjectsFailed={1}", _numberOfFilesDownloaded, _errors.Count); } @@ -341,7 +338,7 @@ private TransferUtilityDownloadDirectoryResponse BuildResponse() private async Task> GetS3ObjectsToDownloadAsync(ListObjectsRequest listRequest, CancellationToken cancellationToken) { - Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Starting object listing"); + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Starting object listing"); List objs = new List(); int pageCount = 0; @@ -364,11 +361,11 @@ private async Task> GetS3ObjectsToDownloadAsync(ListObjectsReques listRequest.Marker = listResponse.NextMarker; pageCount++; - Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", pageCount, listResponse.S3Objects?.Count ?? 0, objs.Count); } while (!string.IsNullOrEmpty(listRequest.Marker)); - Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Listing completed - TotalPages={0}, TotalObjects={1}", + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Listing completed - TotalPages={0}, TotalObjects={1}", pageCount, objs.Count); return objs; @@ -376,7 +373,7 @@ private async Task> GetS3ObjectsToDownloadAsync(ListObjectsReques private async Task> GetS3ObjectsToDownloadV2Async(ListObjectsV2Request listRequestV2, CancellationToken cancellationToken) { - Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Starting object listing (V2 API)"); + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Starting object listing (V2 API)"); List objs = new List(); int pageCount = 0; @@ -399,11 +396,11 @@ private async Task> GetS3ObjectsToDownloadV2Async(ListObjectsV2Re listRequestV2.ContinuationToken = listResponse.NextContinuationToken; pageCount++; - Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", pageCount, listResponse.S3Objects?.Count ?? 0, objs.Count); } while (!string.IsNullOrEmpty(listRequestV2.ContinuationToken)); - Logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Listing completed - TotalPages={0}, TotalObjects={1}", + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Listing completed - TotalPages={0}, TotalObjects={1}", pageCount, objs.Count); return objs; diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs index f4dde2c232e2..d9cf6863e791 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs @@ -71,14 +71,7 @@ public partial class TransferUtility : ITransferUtility { "s3-object-lambda" }; - private static Logger Logger - { - get - { - - return Logger.GetLogger(typeof(ITransferUtility)); - } - } + private readonly Logger _logger = Logger.GetLogger(typeof(TransferUtility)); #region Constructors /// From 4c8c7aaa3dd5eaaebc6c22836874e4d3c69ffd88 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 13:32:55 -0500 Subject: [PATCH 42/56] Create UploadDirectoryWithResponse api + progress tracking events + update docs and sync exception handling (#4187) --- .../9d07dc1e-d82d-4f94-8700-c7b57f872044.json | 11 + .../9d07dc1e-d82d-4f94-8700-c7b57f872124.json | 11 + .../Internal/UploadDirectoryCommand.cs | 38 +- .../UploadDirectoryCommand.cs | 65 +- .../TransferUtilityUploadDirectoryRequest.cs | 205 ++++++ .../ITransferUtility.async.cs | 88 +++ .../_bcl+netstandard/ITransferUtility.sync.cs | 79 ++ .../_bcl+netstandard/TransferUtility.async.cs | 164 +---- .../_bcl+netstandard/TransferUtility.sync.cs | 118 +-- ...ferUtilityUploadDirectoryLifecycleTests.cs | 309 ++++++++ ...UtilityUploadDirectoryWithResponseTests.cs | 672 ++++++++++++++++++ 11 files changed, 1512 insertions(+), 248 deletions(-) create mode 100644 generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json create mode 100644 generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json create mode 100644 sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs create mode 100644 sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json new file mode 100644 index 000000000000..b2cafff31230 --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Created new UploadDirectoryWithResponseAsync method on the Amazon.S3.Transfer.TransferUtility class." + ] + } + ] +} \ No newline at end of file diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json new file mode 100644 index 000000000000..d5508da3272f --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadDirectoryInitiatedEvent, UploadDirectoryCompletedEvent, and UploadDirectoryFailedEvent for Amazon.S3.Transfer.TransferUtility.UploadDirectory." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index 148e34798d47..b6c884a8361f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -45,7 +45,43 @@ internal partial class UploadDirectoryCommand : BaseCommand ExecuteAsync(CancellationToken cancellationToken) { - // Step 1: Setup paths and discover files - string prefix = GetKeyPrefix(); - string basePath = new DirectoryInfo(this._request.Directory).FullName; + try + { + // Step 1: Setup paths and discover files + string prefix = GetKeyPrefix(); + string basePath = new DirectoryInfo(this._request.Directory).FullName; - _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Starting - BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", - basePath, prefix, UploadFilesConcurrently, this._config.ConcurrentServiceRequests); + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Starting - BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", + basePath, prefix, UploadFilesConcurrently, this._config.ConcurrentServiceRequests); - // Step 2: Discover files to upload - string[] filePaths = await DiscoverFilesAsync(basePath, cancellationToken) - .ConfigureAwait(false); + // Step 2: Discover files to upload + string[] filePaths = await DiscoverFilesAsync(basePath, cancellationToken) + .ConfigureAwait(false); - this._totalNumberOfFiles = filePaths.Length; - _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Discovered {0} file(s) to upload. TotalBytes={1}", - _totalNumberOfFiles, _totalBytes); + this._totalNumberOfFiles = filePaths.Length; + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Discovered {0} file(s) to upload. TotalBytes={1}", + _totalNumberOfFiles, _totalBytes); - // Step 3: Setup resources and execute uploads - using (var resources = CreateUploadResources(cancellationToken)) - { - await ExecuteParallelUploadsAsync( - filePaths, - basePath, - prefix, - resources, - cancellationToken) - .ConfigureAwait(false); - } + FireTransferInitiatedEvent(); - // Step 4: Build and return response - _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", - _numberOfFilesSuccessfullyUploaded, _errors.Count); - return BuildResponse(); + // Step 3: Setup resources and execute uploads + using (var resources = CreateUploadResources(cancellationToken)) + { + await ExecuteParallelUploadsAsync( + filePaths, + basePath, + prefix, + resources, + cancellationToken) + .ConfigureAwait(false); + } + + // Step 4: Build and return response + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", + _numberOfFilesSuccessfullyUploaded, _errors.Count); + + var response = BuildResponse(); + FireTransferCompletedEvent(response); + return response; + } + catch + { + FireTransferFailedEvent(); + throw; + } } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs index 004e83d1f81d..802d544ef86c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs @@ -55,6 +55,33 @@ public FailurePolicy FailurePolicy set { this.failurePolicy = value; } } + /// + /// Occurs when the upload directory operation is initiated. + /// + /// + /// This event is raised before any files are uploaded, providing information about + /// the total number of files and bytes that will be uploaded. + /// + public event EventHandler UploadDirectoryInitiatedEvent; + + /// + /// Occurs when the upload directory operation completes successfully. + /// + /// + /// This event is raised after all files have been processed (successfully or with failures), + /// providing the final response and statistics. + /// + public event EventHandler UploadDirectoryCompletedEvent; + + /// + /// Occurs when the upload directory operation fails. + /// + /// + /// This event is raised when the entire operation fails (not individual file failures). + /// Individual file failures are reported through . + /// + public event EventHandler UploadDirectoryFailedEvent; + /// /// Occurs when an individual object fails to upload during an UploadDirectory operation. /// @@ -72,6 +99,33 @@ public FailurePolicy FailurePolicy /// public event EventHandler ObjectUploadFailedEvent; + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryInitiatedEvent(UploadDirectoryInitiatedEventArgs args) + { + UploadDirectoryInitiatedEvent?.Invoke(this, args); + } + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryCompletedEvent(UploadDirectoryCompletedEventArgs args) + { + UploadDirectoryCompletedEvent?.Invoke(this, args); + } + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryFailedEvent(UploadDirectoryFailedEventArgs args) + { + UploadDirectoryFailedEvent?.Invoke(this, args); + } + /// /// Internal helper used by the transfer implementation to raise the . /// @@ -421,6 +475,157 @@ public UploadDirectoryFileRequestArgs(TransferUtilityUploadRequest request) public TransferUtilityUploadRequest UploadRequest { get; set; } } + /// + /// Provides data for . + /// + public class UploadDirectoryInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The total number of files to upload. + /// The total number of bytes to upload. + internal UploadDirectoryInitiatedEventArgs( + TransferUtilityUploadDirectoryRequest request, + long totalFiles, + long totalBytes) + { + Request = request; + TotalFiles = totalFiles; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the total number of files to upload. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the total number of bytes to upload. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Provides data for . + /// + public class UploadDirectoryCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The upload directory response. + /// The number of files successfully uploaded. + /// The total number of files attempted. + /// The number of bytes transferred. + /// The total number of bytes. + internal UploadDirectoryCompletedEventArgs( + TransferUtilityUploadDirectoryRequest request, + TransferUtilityUploadDirectoryResponse response, + long transferredFiles, + long totalFiles, + long transferredBytes, + long totalBytes) + { + Request = request; + Response = response; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the upload directory response. + /// + public TransferUtilityUploadDirectoryResponse Response { get; private set; } + + /// + /// Gets the number of files successfully uploaded. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files attempted. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the number of bytes transferred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Provides data for . + /// + public class UploadDirectoryFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The number of files successfully uploaded before failure. + /// The total number of files attempted. + /// The number of bytes transferred before failure. + /// The total number of bytes. + internal UploadDirectoryFailedEventArgs( + TransferUtilityUploadDirectoryRequest request, + long transferredFiles, + long totalFiles, + long transferredBytes, + long totalBytes) + { + Request = request; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the number of files successfully uploaded before failure. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files attempted. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the number of bytes transferred before failure. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes. + /// + public long TotalBytes { get; private set; } + } + /// /// Provides data for /// which is raised when an individual object fails to upload during an diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs index e11731050c43..a25a43c8b5a5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs @@ -145,6 +145,94 @@ public partial interface ITransferUtility /// The task object representing the asynchronous operation. Task UploadDirectoryAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A pattern used to identify the files from the source directory to upload. + /// + /// + /// A search option that specifies whether to recursively search for files to upload + /// in subdirectories. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The request that contains all the parameters required to upload a directory. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion #region DownloadDirectory diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index 979fc54daf9f..b984fa70b800 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -110,6 +110,85 @@ public partial interface ITransferUtility /// The request that contains all the parameters required to upload a directory. /// void UploadDirectory(TransferUtilityUploadDirectoryRequest request); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A pattern used to identify the files from the source directory to upload. + /// + /// + /// A search option that specifies whether to recursively search for files to upload + /// in subdirectories. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName, string searchPattern, SearchOption searchOption); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The request that contains all the parameters required to upload a directory. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(TransferUtilityUploadDirectoryRequest request); #endregion #region Upload diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs index 81d2d4b43351..6cda6c5c6194 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs @@ -54,103 +54,21 @@ public partial class TransferUtility : ITransferUtility { #region UploadDirectory - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadDirectoryRequest(directory, bucketName); await UploadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A pattern used to identify the files from the source directory to upload. - /// - /// - /// A search option that specifies whether to recursively search for files to upload - /// in subdirectories. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadDirectoryRequest(directory, bucketName, searchPattern, searchOption); await UploadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The request that contains all the parameters required to upload a directory. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(UploadDirectoryAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -163,45 +81,44 @@ public partial class TransferUtility : ITransferUtility } } + /// + public async Task UploadDirectoryWithResponseAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadDirectoryRequest(directory, bucketName); + return await UploadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadDirectoryWithResponseAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadDirectoryRequest(directory, bucketName, searchPattern, searchOption); + return await UploadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadDirectoryWithResponseAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(UploadDirectoryWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "UploadDirectory"); + validate(request); + UploadDirectoryCommand command = new UploadDirectoryCommand(this, this._config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + #endregion #region DownloadDirectory - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by s3Directory. - /// - /// - /// The name of the bucket containing the Amazon S3 objects to download. - /// - /// - /// The directory in Amazon S3 to download. - /// - /// - /// The local directory to download the objects to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadDirectoryAsync(string bucketName, string s3Directory, string localDirectory, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructDownloadDirectoryRequest(bucketName, s3Directory, localDirectory); await DownloadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by the S3Directory - /// property of the passed in TransferUtilityDownloadDirectoryRequest object. - /// - /// - /// Contains all the parameters required to download objects from Amazon S3 - /// into a local directory. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadDirectoryAsync(TransferUtilityDownloadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(DownloadDirectoryAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -215,22 +132,7 @@ public partial class TransferUtility : ITransferUtility #endregion #region Download - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// - /// - /// The file path where the content from Amazon S3 will be written to. - /// - /// - /// The name of the bucket containing the Amazon S3 object to download. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructDownloadRequest(filePath, bucketName, key); diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index 457360ccd3f7..05f5c2cac349 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -70,6 +70,24 @@ public void UploadDirectory(TransferUtilityUploadDirectoryRequest request) ExceptionDispatchInfo.Capture(e.InnerException).Throw(); } } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName) + { + return UploadDirectoryWithResponseAsync(directory, bucketName).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName, string searchPattern, SearchOption searchOption) + { + return UploadDirectoryWithResponseAsync(directory, bucketName, searchPattern, searchOption).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(TransferUtilityUploadDirectoryRequest request) + { + return UploadDirectoryWithResponseAsync(request).GetAwaiter().GetResult(); + } #endregion #region Upload @@ -130,57 +148,25 @@ public void Upload(TransferUtilityUploadRequest request) /// public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName) { - try - { - return UploadWithResponseAsync(filePath, bucketName).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(filePath, bucketName).GetAwaiter().GetResult(); } /// public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key) { - try - { - return UploadWithResponseAsync(filePath, bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(filePath, bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key) { - try - { - return UploadWithResponseAsync(stream, bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(stream, bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request) { - try - { - return UploadWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion @@ -218,29 +204,13 @@ public Stream OpenStream(TransferUtilityOpenStreamRequest request) /// public TransferUtilityOpenStreamResponse OpenStreamWithResponse(string bucketName, string key) { - try - { - return OpenStreamWithResponseAsync(bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return OpenStreamWithResponseAsync(bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityOpenStreamResponse OpenStreamWithResponse(TransferUtilityOpenStreamRequest request) { - try - { - return OpenStreamWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return OpenStreamWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion @@ -275,29 +245,13 @@ public void Download(TransferUtilityDownloadRequest request) /// public TransferUtilityDownloadResponse DownloadWithResponse(string filePath, string bucketName, string key) { - try - { - return DownloadWithResponseAsync(filePath, bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadWithResponseAsync(filePath, bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityDownloadResponse DownloadWithResponse(TransferUtilityDownloadRequest request) { - try - { - return DownloadWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion @@ -331,29 +285,13 @@ public void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request) /// public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(string bucketName, string s3Directory, string localDirectory) { - try - { - return DownloadDirectoryWithResponseAsync(bucketName, s3Directory, localDirectory).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadDirectoryWithResponseAsync(bucketName, s3Directory, localDirectory).GetAwaiter().GetResult(); } /// public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(TransferUtilityDownloadDirectoryRequest request) { - try - { - return DownloadDirectoryWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadDirectoryWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs new file mode 100644 index 000000000000..6e3c70d7eb0d --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs @@ -0,0 +1,309 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using Amazon.S3.Transfer.Model; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility upload directory lifecycle events. + /// Tests the initiated, completed, and failed events for directory uploads. + /// + [TestClass] + public class TransferUtilityUploadDirectoryLifecycleTests : TestBase + { + public static readonly long MEG_SIZE = (int)Math.Pow(2, 20); + public static readonly long KILO_SIZE = (int)Math.Pow(2, 10); + public static readonly string BasePath = Path.Combine(Path.GetTempPath(), "transferutility", "uploaddirectorylifecycle"); + + private static string bucketName; + private static string plainTextContentType = "text/plain"; + + [ClassInitialize()] + public static void ClassInitialize(TestContext a) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + BaseClean(); + if (Directory.Exists(BasePath)) + { + Directory.Delete(BasePath, true); + } + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryInitiatedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Request.BucketName); + Assert.IsNotNull(args.Request.Directory); + + // Verify that total files and bytes are provided in initiated event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + + } + }; + UploadDirectoryWithLifecycleEvents(10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryCompletedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + + // Verify progress information is available in completed event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles, "All files should be transferred"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes, "All bytes should be transferred"); + + // Verify response contains expected data + Assert.AreEqual(args.TransferredFiles, args.Response.ObjectsUploaded, "Response ObjectsUploaded should match TransferredFiles"); + Assert.AreEqual(0, args.Response.ObjectsFailed, "No objects should have failed"); + Assert.AreEqual(DirectoryResult.Success, args.Response.Result, "Result should be Success"); + + } + }; + UploadDirectoryWithLifecycleEvents(12 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryFailedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + } + }; + + // Use an invalid bucket name to force a real exception + // Bucket names with uppercase letters are invalid and will cause an exception + var invalidBucketName = "INVALID-BUCKET-NAME-" + Guid.NewGuid().ToString(); + + var directory = CreateTestDirectory(5 * MEG_SIZE); + var directoryPath = directory.FullName; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = invalidBucketName, // This will cause an exception due to invalid bucket name + Directory = directoryPath, + KeyPrefix = "test-prefix", + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryFailedEvent += eventValidator.OnEventFired; + + try + { + transferUtility.UploadDirectory(request); + Assert.Fail("Expected an exception to be thrown for invalid bucket name"); + } + catch (Exception ex) + { + // Expected exception - the failed event should have been fired + Console.WriteLine($"Expected exception caught: {ex.GetType().Name} - {ex.Message}"); + } + + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryCompleteLifecycleTest() + { + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(bucketName, args.Request.BucketName); + Assert.IsNotNull(args.Request.Directory); + Assert.IsTrue(args.TotalFiles > 0); + Assert.IsTrue(args.TotalBytes > 0); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.IsTrue(args.TotalFiles > 0, "Should have uploaded at least one file"); + Assert.AreEqual(DirectoryResult.Success, args.Response.Result); + } + }; + + UploadDirectoryWithLifecycleEvents(15 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + + #region Helper Methods + + void UploadDirectoryWithLifecycleEvents(long fileSize, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var directory = CreateTestDirectory(fileSize); + var keyPrefix = directory.Name; + var directoryPath = directory.FullName; + + UploadDirectoryWithLifecycleEventsAndDirectory(directoryPath, keyPrefix, initiatedValidator, completedValidator, failedValidator); + } + + void UploadDirectoryWithLifecycleEventsAndDirectory(string directoryPath, string keyPrefix, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = directoryPath, + KeyPrefix = keyPrefix, + ContentType = plainTextContentType, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + if (initiatedValidator != null) + { + request.UploadDirectoryInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.UploadDirectoryCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.UploadDirectoryFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.UploadDirectory(request); + + // Validate uploaded directory contents if it was successful + var directory = new DirectoryInfo(directoryPath); + ValidateDirectoryContentsInS3(Client, bucketName, keyPrefix, directory); + } + + public static DirectoryInfo CreateTestDirectory(long fileSize = 0, int numberOfTestFiles = 3) + { + if (fileSize == 0) + fileSize = 1 * MEG_SIZE; + + var directoryPath = GenerateDirectoryPath(); + for (int i = 0; i < numberOfTestFiles; i++) + { + var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); + UtilityMethods.GenerateFile(filePath, fileSize); + } + + return new DirectoryInfo(directoryPath); + } + + public static string GenerateDirectoryPath(string baseName = "UploadDirectoryLifecycleTest") + { + var directoryName = UtilityMethods.GenerateName(baseName); + var directoryPath = Path.Combine(BasePath, directoryName); + return directoryPath; + } + + public static void ValidateDirectoryContentsInS3(IAmazonS3 s3client, string bucketName, string keyPrefix, DirectoryInfo sourceDirectory) + { + var directoryPath = sourceDirectory.FullName; + var files = sourceDirectory.GetFiles("*", SearchOption.AllDirectories); + foreach (var file in files) + { + var filePath = file.FullName; + var relativePath = filePath.Substring(directoryPath.Length + 1); + var key = (!string.IsNullOrEmpty(keyPrefix) ? keyPrefix + "/" : string.Empty) + relativePath.Replace("\\", "/"); + + // Verify the object exists in S3 + var metadata = s3client.GetObjectMetadata(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + Assert.IsNotNull(metadata, $"Object {key} should exist in S3"); + Console.WriteLine($"Validated object exists in S3: {key}"); + } + } + + #endregion + + #region Shared Helper Classes + + class TransferLifecycleEventValidator + { + public Action Validate { get; set; } + public bool EventFired { get; private set; } + public Exception EventException { get; private set; } + + public void OnEventFired(object sender, T eventArgs) + { + try + { + Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); + Validate?.Invoke(eventArgs); + EventFired = true; // Only set if validation passes + } + catch (Exception ex) + { + EventException = ex; + EventFired = false; // Ensure we don't mark as fired on failure + Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); + // Don't re-throw, let AssertEventFired() handle it + } + } + + public void AssertEventFired() + { + if (EventException != null) + throw EventException; + Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs new file mode 100644 index 000000000000..dee59bc11a5b --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs @@ -0,0 +1,672 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Model; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.UploadDirectoryWithResponseAsync functionality. + /// These tests verify end-to-end functionality with actual S3 operations and directory I/O. + /// + /// These integration tests focus on: + /// - Basic directory uploads with response object + /// - Progress tracking with response + /// - Multipart uploads in directory context + /// - Concurrent vs sequential uploads + /// - Nested directory structures + /// - Response validation + /// + [TestClass] + public class TransferUtilityUploadDirectoryWithResponseTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static readonly long KB = 1024; + private static string bucketName; + private static string tempDirectory; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + tempDirectory = Path.Combine(Path.GetTempPath(), "S3UploadDirectoryTests-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempDirectory); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + + // Clean up temp directory + if (Directory.Exists(tempDirectory)) + { + try + { + Directory.Delete(tempDirectory, recursive: true); + } + catch + { + // Best effort cleanup + } + } + + BaseClean(); + } + + [TestCleanup] + public void TestCleanup() + { + // Clean up any test directories after each test + if (Directory.Exists(tempDirectory)) + { + foreach (var subDir in Directory.GetDirectories(tempDirectory)) + { + try + { + Directory.Delete(subDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + #region Basic Upload Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_BasicUpload_ReturnsCorrectResponse() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("basic-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 5; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsUploaded, "ObjectsUploaded should match file count"); + Assert.AreEqual(0, response.ObjectsFailed, "ObjectsFailed should be 0"); + Assert.AreEqual(DirectoryResult.Success, response.Result, "Result should be Success"); + + // Verify all files were uploaded to S3 + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_EmptyDirectory_ReturnsZeroObjectsUploaded() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("empty-directory"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + Directory.CreateDirectory(uploadPath); + + // Act - Upload empty directory + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(0, response.ObjectsUploaded, "ObjectsUploaded should be 0 for empty directory"); + Assert.AreEqual(0, response.ObjectsFailed, "ObjectsFailed should be 0"); + Assert.AreEqual(DirectoryResult.Success, response.Result, "Result should be Success"); + } + + #endregion + + #region Progress Tracking Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_WithProgressTracking_FiresProgressEvents() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("progress-tracking"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + CreateLocalTestDirectory(uploadPath, 5 * MB, fileCount); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.IsTrue(progressEvents.Count > 0, "Progress events should have fired"); + + // Verify final progress event + var finalEvent = progressEvents.Last(); + Assert.AreEqual(fileCount, finalEvent.NumberOfFilesUploaded); + Assert.AreEqual(fileCount, finalEvent.TotalNumberOfFiles); + Assert.AreEqual(finalEvent.TransferredBytes, finalEvent.TotalBytes); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_WithLifecycleEvents_FiresInitiatedAndCompleted() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("lifecycle-events"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + bool initiatedFired = false; + bool completedFired = false; + UploadDirectoryInitiatedEventArgs initiatedArgs = null; + UploadDirectoryCompletedEventArgs completedArgs = null; + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryInitiatedEvent += (sender, args) => + { + initiatedFired = true; + initiatedArgs = args; + }; + + request.UploadDirectoryCompletedEvent += (sender, args) => + { + completedFired = true; + completedArgs = args; + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsTrue(initiatedFired, "Initiated event should have fired"); + Assert.IsTrue(completedFired, "Completed event should have fired"); + + Assert.IsNotNull(initiatedArgs); + Assert.AreEqual(fileCount, initiatedArgs.TotalFiles); + Assert.IsTrue(initiatedArgs.TotalBytes > 0); + + Assert.IsNotNull(completedArgs); + Assert.AreEqual(fileCount, completedArgs.TransferredFiles); + Assert.AreEqual(fileCount, completedArgs.TotalFiles); + Assert.AreEqual(completedArgs.Response, response); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_SequentialMode_IncludesCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-progress"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + CreateLocalTestDirectory(uploadPath, 3 * MB, 3); + + var progressEvents = new List(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = false // Sequential mode + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsUploaded); + + // In sequential mode, should have CurrentFile populated + var eventsWithFile = progressEvents.Where(e => e.CurrentFile != null).ToList(); + Assert.IsTrue(eventsWithFile.Count > 0, "Should have events with CurrentFile populated"); + + foreach (var evt in eventsWithFile) + { + Assert.IsNotNull(evt.CurrentFile); + Assert.IsTrue(evt.TotalNumberOfBytesForCurrentFile > 0); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_ConcurrentMode_OmitsCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-progress"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + CreateLocalTestDirectory(uploadPath, 3 * MB, 4); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = true // Concurrent mode + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(4, response.ObjectsUploaded); + Assert.IsTrue(progressEvents.Count > 0); + + // In concurrent mode, CurrentFile should be null + foreach (var evt in progressEvents) + { + Assert.IsNull(evt.CurrentFile, "CurrentFile should be null in concurrent mode"); + Assert.AreEqual(0, evt.TransferredBytesForCurrentFile); + Assert.AreEqual(0, evt.TotalNumberOfBytesForCurrentFile); + } + } + + #endregion + + #region Multipart Upload Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + [TestCategory("Multipart")] + public async Task UploadDirectoryWithResponse_WithMultipartFiles_UploadsSuccessfully() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("multipart-directory"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + // Create directory with large files to trigger multipart (>16MB threshold) + CreateLocalTestDirectory(uploadPath, 20 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + + // Verify all files uploaded with correct sizes + await VerifyObjectsInS3WithSize(keyPrefix, fileCount, 20 * MB); + } + + #endregion + + #region Nested Directory Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_NestedDirectories_PreservesStructure() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("nested-structure"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + // Create nested directory structure + var nestedFiles = new Dictionary + { + { "level1/file1.txt", 1 * MB }, + { "level1/level2/file2.txt", 2 * MB }, + { "level1/level2/level3/file3.txt", 3 * MB } + }; + + CreateLocalTestDirectoryWithStructure(uploadPath, nestedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(nestedFiles.Count, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + // Verify S3 keys have proper structure + foreach (var kvp in nestedFiles) + { + var expectedKey = keyPrefix + "/" + kvp.Key.Replace('\\', '/'); + await VerifyObjectExistsInS3(expectedKey, kvp.Value); + } + } + + #endregion + + #region Concurrent vs Sequential Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_ConcurrentMode_UploadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 10; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = true + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_SequentialMode_UploadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 5; + + CreateLocalTestDirectory(uploadPath, 3 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = false + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + #endregion + + #region Mixed File Size Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_MixedFileSizes_UploadsAll() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("mixed-sizes"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + var mixedFiles = new Dictionary + { + { "tiny.txt", 100 }, // 100 bytes + { "small.txt", 512 * KB }, // 512 KB + { "medium.txt", 5 * MB }, // 5 MB + { "large.txt", 20 * MB } // 20 MB (multipart) + }; + + CreateLocalTestDirectoryWithStructure(uploadPath, mixedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(mixedFiles.Count, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + // Verify each file's size in S3 + foreach (var kvp in mixedFiles) + { + var s3Key = keyPrefix + "/" + kvp.Key; + await VerifyObjectExistsInS3(s3Key, kvp.Value); + } + } + + #endregion + + #region Helper Methods + + /// + /// Creates a local test directory with specified number of files. + /// + private static void CreateLocalTestDirectory(string directoryPath, long fileSize, int fileCount) + { + Directory.CreateDirectory(directoryPath); + + for (int i = 0; i < fileCount; i++) + { + var fileName = $"file{i}.dat"; + var filePath = Path.Combine(directoryPath, fileName); + UtilityMethods.GenerateFile(filePath, fileSize); + } + } + + /// + /// Creates a local test directory with specific file structure. + /// + private static void CreateLocalTestDirectoryWithStructure(string directoryPath, Dictionary files) + { + foreach (var kvp in files) + { + var filePath = Path.Combine(directoryPath, kvp.Key.Replace('/', Path.DirectorySeparatorChar)); + var directory = Path.GetDirectoryName(filePath); + + if (!string.IsNullOrEmpty(directory)) + { + Directory.CreateDirectory(directory); + } + + UtilityMethods.GenerateFile(filePath, kvp.Value); + } + } + + /// + /// Verifies that the expected number of objects exist in S3 under the given prefix. + /// + private static async Task VerifyObjectsInS3(string keyPrefix, int expectedCount) + { + var listRequest = new ListObjectsV2Request + { + BucketName = bucketName, + Prefix = keyPrefix + "/" + }; + + var listResponse = await Client.ListObjectsV2Async(listRequest); + + // Filter out directory markers + var actualObjects = listResponse.S3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + Assert.AreEqual(expectedCount, actualObjects.Count, + $"Expected {expectedCount} objects in S3 under prefix '{keyPrefix}', found {actualObjects.Count}"); + } + + /// + /// Verifies that the expected number of objects exist in S3 with the specified size. + /// + private static async Task VerifyObjectsInS3WithSize(string keyPrefix, int expectedCount, long expectedSize) + { + var listRequest = new ListObjectsV2Request + { + BucketName = bucketName, + Prefix = keyPrefix + "/" + }; + + var listResponse = await Client.ListObjectsV2Async(listRequest); + + var actualObjects = listResponse.S3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + Assert.AreEqual(expectedCount, actualObjects.Count); + + foreach (var s3Object in actualObjects) + { + Assert.AreEqual(expectedSize, s3Object.Size, + $"Object {s3Object.Key} should be {expectedSize} bytes"); + } + } + + /// + /// Verifies that a specific object exists in S3 with the expected size. + /// + private static async Task VerifyObjectExistsInS3(string key, long expectedSize) + { + var getRequest = new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }; + + var metadata = await Client.GetObjectMetadataAsync(getRequest); + + Assert.IsNotNull(metadata, $"Object should exist in S3: {key}"); + Assert.AreEqual(expectedSize, metadata.ContentLength, + $"Object {key} should be {expectedSize} bytes"); + } + + #endregion + } +} From 195860c8eb0ca6f5bd97864536d3e21152311163 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 13:51:37 -0500 Subject: [PATCH 43/56] fix test (#4190) --- .../Custom/MultipartDownloadManagerTests.cs | 69 ++++++++++++++----- 1 file changed, 50 insertions(+), 19 deletions(-) diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index c2d1926bd823..7ea1c89af832 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -859,9 +859,10 @@ public async Task StartDownloadsAsync_MultipartDownload_DoesNotCallWaitForCapaci } [TestMethod] - public async Task StartDownloadsAsync_BackgroundTask_PreAcquiresCapacityBeforeCreatingTasks() + public async Task StartDownloadsAsync_BackgroundTask_InterleavesCapacityAcquisitionWithTaskCreation() { - // Arrange - Test that background task pre-acquires all capacity before creating download tasks + // Arrange - Test that background task interleaves capacity acquisition with task creation + // This ensures sequential ordering: capacity(2) → task(2) → capacity(3) → task(3) var totalParts = 3; var partSize = 8 * 1024 * 1024; var totalObjectSize = totalParts * partSize; @@ -879,7 +880,8 @@ public async Task StartDownloadsAsync_BackgroundTask_PreAcquiresCapacityBeforeCr { lock (lockObject) { - var partNum = operationOrder.Count(o => o.operation == "capacity") + 2; // Parts 2, 3 + // Capacity is now acquired for Parts 1, 2, 3 (Part 1 during discovery) + var partNum = operationOrder.Count(o => o.operation == "capacity") + 1; operationOrder.Add(("capacity", partNum, operationCounter++)); } return Task.CompletedTask; @@ -919,27 +921,56 @@ public async Task StartDownloadsAsync_BackgroundTask_PreAcquiresCapacityBeforeCr var capacityOps = operationOrder.Where(o => o.operation == "capacity").ToList(); var taskOps = operationOrder.Where(o => o.operation == "task").ToList(); - Assert.AreEqual(3, capacityOps.Count, "Should acquire capacity discovery part 1 and for parts 2-3"); + Assert.AreEqual(3, capacityOps.Count, "Should acquire capacity for parts 1 (discovery), 2, 3 (background)"); Assert.AreEqual(3, taskOps.Count, "Should create tasks for parts 1-3"); - // Verify all capacity acquisitions happened before any task creation - // Find the highest sequence number among capacity operations - var lastCapacitySequence = capacityOps.Max(o => o.sequence); + // Verify Part 1: capacity → task (during discovery) + var part1Capacity = capacityOps.FirstOrDefault(o => o.partNum == 1); + var part1Task = taskOps.FirstOrDefault(o => o.partNum == 1); + Assert.IsNotNull(part1Capacity, "Part 1 capacity should be acquired during discovery"); + Assert.IsNotNull(part1Task, "Part 1 should be processed"); + Assert.IsTrue(part1Capacity.sequence < part1Task.sequence, + "Part 1 capacity should be acquired before Part 1 task"); - // Find the lowest sequence number among task operations - var firstTaskSequence = taskOps.Min(o => o.sequence); + // Verify interleaved pattern for background parts (2, 3) + // For each background part: capacity(N) → task(N) → capacity(N+1) → task(N+1) + for (int partNum = 2; partNum <= totalParts; partNum++) + { + var capacity = capacityOps.FirstOrDefault(o => o.partNum == partNum); + var task = taskOps.FirstOrDefault(o => o.partNum == partNum); + + Assert.IsNotNull(capacity, $"Part {partNum} capacity should be acquired"); + Assert.IsNotNull(task, $"Part {partNum} task should be created"); + + // Verify capacity comes before task for this part + Assert.IsTrue(capacity.sequence < task.sequence, + $"Part {partNum} capacity (seq={capacity.sequence}) should come before task (seq={task.sequence})"); + + // Verify interleaving: task(N) should come before capacity(N+1) + if (partNum < totalParts) + { + var nextCapacity = capacityOps.FirstOrDefault(o => o.partNum == partNum + 1); + Assert.IsNotNull(nextCapacity, $"Part {partNum + 1} capacity should exist"); + Assert.IsTrue(task.sequence < nextCapacity.sequence, + $"Part {partNum} task (seq={task.sequence}) should come before Part {partNum + 1} capacity (seq={nextCapacity.sequence})"); + } + } - // All capacity must be acquired (have lower sequence numbers) before tasks start - Assert.IsTrue(lastCapacitySequence < firstTaskSequence, - $"All capacity acquisitions must complete before task creation. " + - $"Last capacity sequence: {lastCapacitySequence}, First task sequence: {firstTaskSequence}. " + - $"Operations: {string.Join(", ", operationOrder.Select(o => $"{o.operation}({o.partNum})={o.sequence}"))}"); + // Verify overall sequential pattern: capacity(1) → task(1) → capacity(2) → task(2) → capacity(3) → task(3) + var expectedPattern = new[] + { + ("capacity", 1), ("task", 1), + ("capacity", 2), ("task", 2), + ("capacity", 3), ("task", 3) + }; - // Additional verification: Part 1 should be first task (processed during StartDownloadsAsync) - var part1Task = taskOps.FirstOrDefault(o => o.partNum == 1); - Assert.IsNotNull(part1Task, "Part 1 should be processed"); - Assert.IsTrue(part1Task.sequence < lastCapacitySequence, - "Part 1 should be processed before capacity acquisition for background parts"); + for (int i = 0; i < expectedPattern.Length; i++) + { + Assert.AreEqual(expectedPattern[i].Item1, operationOrder[i].operation, + $"Operation {i} should be {expectedPattern[i].Item1}"); + Assert.AreEqual(expectedPattern[i].Item2, operationOrder[i].partNum, + $"Operation {i} should be for part {expectedPattern[i].Item2}"); + } } } From c3da03245e4b6605da85940642453586c9076ac6 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 13:58:34 -0500 Subject: [PATCH 44/56] fix integ test build (#4191) --- .../TransferUtilityUploadDirectoryLifecycleTests.cs | 1 - .../TransferUtilityUploadDirectoryWithResponseTests.cs | 1 - 2 files changed, 2 deletions(-) diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs index 6e3c70d7eb0d..4acfe928ec7a 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs @@ -8,7 +8,6 @@ using Amazon.S3.Model; using Amazon.S3.Transfer; using Amazon.S3.Util; -using Amazon.S3.Transfer.Model; using AWSSDK_DotNet.IntegrationTests.Utils; namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs index dee59bc11a5b..1f7c9f9d6c05 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs @@ -7,7 +7,6 @@ using Amazon.S3; using Amazon.S3.Model; using Amazon.S3.Transfer; -using Amazon.S3.Transfer.Model; using Amazon.S3.Util; using AWSSDK_DotNet.IntegrationTests.Utils; From 7f648134eee9c31bd6a4c1d6f35997d05aeb1c7c Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Wed, 3 Dec 2025 14:59:08 -0500 Subject: [PATCH 45/56] fix build. (#4192) --- .../Custom/Transfer/Internal/BufferedPartDataHandler.cs | 8 +++----- .../S3/UnitTests/Custom/PartBufferManagerTests.cs | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs index a52e5a159758..82d0a8f4d590 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -99,7 +99,7 @@ public async Task ProcessPartAsync( { if (partNumber == _partBufferManager.NextExpectedPartNumber) { - await ProcessStreamingPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + ProcessStreamingPart(partNumber, response); } else { @@ -113,7 +113,6 @@ public async Task ProcessPartAsync( /// /// The part number being processed. /// The GetObjectResponse containing the part data. Ownership is transferred to StreamingDataSource. - /// Cancellation token for the operation. /// /// This method is called when the part arrives in the expected sequential order, allowing /// for optimal zero-copy streaming directly to the consumer without buffering into memory. @@ -129,10 +128,9 @@ public async Task ProcessPartAsync( /// - If constructor succeeds but AddBufferAsync fails: StreamingDataSource.Dispose() handles the response /// - If AddBufferAsync succeeds: Buffer manager owns everything and will clean up /// - private async Task ProcessStreamingPartAsync( + private void ProcessStreamingPart( int partNumber, - GetObjectResponse response, - CancellationToken cancellationToken) + GetObjectResponse response) { _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Matches NextExpectedPartNumber - streaming directly without buffering", partNumber); diff --git a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs index 394490f1fbd2..121c50f0dede 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs @@ -1006,7 +1006,7 @@ public async Task AddBufferAsync_IPartDataSource_WithBufferedDataSource_AddsSucc [TestMethod] [ExpectedException(typeof(ArgumentNullException))] - public async Task AddBufferAsync_IPartDataSource_WithNull_ThrowsArgumentNullException() + public void AddBufferAsync_IPartDataSource_WithNull_ThrowsArgumentNullException() { // Arrange var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); From 1012cfdad65f96266596b1e32f892a4f4ee8b891 Mon Sep 17 00:00:00 2001 From: Philippe El Asmar <53088140+philasmar@users.noreply.github.com> Date: Wed, 3 Dec 2025 16:58:35 -0500 Subject: [PATCH 46/56] remove InternalsVisibleTo for S3 integration tests (#4194) --- .../Generators/SourceFiles/AssemblyInfo.cs | 36 +++++-------------- .../Generators/SourceFiles/AssemblyInfo.tt | 4 --- .../Services/S3/Properties/AssemblyInfo.cs | 4 --- ...tegrationTestUtilities.NetFramework.csproj | 14 -------- ...DK.IntegrationTests.S3.NetFramework.csproj | 14 -------- .../IntegrationTests/TransferUtilityTests.cs | 15 ++------ 6 files changed, 11 insertions(+), 76 deletions(-) diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs index 5f1a040525fd..09880b4803f6 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs @@ -92,32 +92,12 @@ public override string TransformText() #line default #line hidden - this.Write("[assembly: InternalsVisibleTo(\"AWSSDK.UnitTests.S3.NetFramework, PublicKey=002400" + - "0004800000940000000602000000240000525341310004000001000100db5f59f098d27276c78338" + - "75a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa7" + - "6adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a4799" + - "67c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4\")]\r\n[" + - "assembly: InternalsVisibleTo(\"AWSSDK.UnitTests.NetFramework, PublicKey=002400000" + - "4800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a" + - "6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76ad" + - "c811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c" + - "3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4\")]\r\n\r\n//" + - " We should remove this in the future when TransferUtility Upload/Download direct" + - "ory methods return responses.\r\n// We should update the Integration Tests in Tran" + - "sferUtilityTests.cs to not use the internal methods and instead use the new publ" + - "ic ones that return responses.\r\n[assembly: InternalsVisibleTo(\"AWSSDK.Integratio" + - "nTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341" + - "310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745" + - "db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba89" + - "1f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28" + - "f3a0752a81203f03ccb7f684db373518b4\")]\r\n[assembly: InternalsVisibleTo(\"DynamicPro" + - "xyGenAssembly2, PublicKey=002400000480000094000000060200000024000052534131000400" + - "0001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734a" + - "a39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64" + - "bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be" + - "11e6a7d3113e92484cf7045cc7\")]\r\n"); - - #line 37 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + this.Write(@"[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] +[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] +[assembly: InternalsVisibleTo(""DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7"")] +"); + + #line 33 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" } #line default @@ -146,14 +126,14 @@ public override string TransformText() // [assembly: AssemblyVersion(""1.0.*"")] [assembly: AssemblyVersion("""); - #line 60 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 56 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceVersion)); #line default #line hidden this.Write("\")]\r\n[assembly: AssemblyFileVersion(\""); - #line 61 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 57 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceFileVersion)); #line default diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt index fbed276512ca..31dceb950beb 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt @@ -29,10 +29,6 @@ using System.Runtime.CompilerServices; <# if (this.Config.AssemblyTitle=="AWSSDK.S3") { #> [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] - -// We should remove this in the future when TransferUtility Upload/Download directory methods return responses. -// We should update the Integration Tests in TransferUtilityTests.cs to not use the internal methods and instead use the new public ones that return responses. -[assembly: InternalsVisibleTo("AWSSDK.IntegrationTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] <# } #> [assembly: AssemblyConfiguration("")] diff --git a/sdk/src/Services/S3/Properties/AssemblyInfo.cs b/sdk/src/Services/S3/Properties/AssemblyInfo.cs index 9c65f527a8c4..980e732be31d 100644 --- a/sdk/src/Services/S3/Properties/AssemblyInfo.cs +++ b/sdk/src/Services/S3/Properties/AssemblyInfo.cs @@ -21,10 +21,6 @@ [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] - -// We should remove this in the future when TransferUtility Upload/Download directory methods return responses. -// We should update the Integration Tests in TransferUtilityTests.cs to not use the internal methods and instead use the new public ones that return responses. -[assembly: InternalsVisibleTo("AWSSDK.IntegrationTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] diff --git a/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj b/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj index 53471f142b89..5fb4162b7a2d 100644 --- a/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj +++ b/sdk/test/IntegrationTests/AWSSDK.IntegrationTestUtilities.NetFramework.csproj @@ -16,23 +16,9 @@ false false true - true CS1591,CS0612,CS0618 true - - - - - ../../awssdk.dll.snk - - - - - $(AWSKeyFile) - - - diff --git a/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj b/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj index 832f59d0fece..fcf55937873e 100644 --- a/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj +++ b/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj @@ -16,23 +16,9 @@ false false true - true true CS1591,CS0612,CS0618 - - - - - ../../../../awssdk.dll.snk - - - - - $(AWSKeyFile) - - - diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index e3f1fa4a2272..bdc72ecdcea4 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -2594,10 +2594,7 @@ public async Task UploadDirectoryFailurePolicy_ContinueOnFailure_AllFailures() }; // ContinueOnFailure should not throw even if all uploads fail - var config = new TransferUtilityConfig(); - var command = new Amazon.S3.Transfer.Internal.UploadDirectoryCommand(transferUtility, config, request); - command.UploadFilesConcurrently = request.UploadFilesConcurrently; - var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); Assert.IsNotNull(response); Assert.AreEqual(0, response.ObjectsUploaded); @@ -2631,10 +2628,7 @@ public async Task UploadDirectoryFailurePolicy_ContinueOnFailure_AllSuccess() UploadFilesConcurrently = true }; - var config = new TransferUtilityConfig(); - var command = new Amazon.S3.Transfer.Internal.UploadDirectoryCommand(transferUtility, config, request); - command.UploadFilesConcurrently = request.UploadFilesConcurrently; - var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); Assert.IsNotNull(response); Assert.AreEqual(3, response.ObjectsUploaded); @@ -2672,10 +2666,7 @@ public async Task UploadDirectoryFailurePolicy_AbortOnFailure_Throws() UploadFilesConcurrently = true }; - var config = new TransferUtilityConfig(); - var command = new Amazon.S3.Transfer.Internal.UploadDirectoryCommand(transferUtility, config, request); - command.UploadFilesConcurrently = request.UploadFilesConcurrently; - await Assert.ThrowsExceptionAsync(() => command.ExecuteAsync(CancellationToken.None)); + await Assert.ThrowsExceptionAsync(() => transferUtility.UploadDirectoryWithResponseAsync(request)); } } finally From 0a857fa81f3de3c845f1698e0336714dc73899a4 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 4 Dec 2025 13:08:27 -0500 Subject: [PATCH 47/56] Fix DownloadPartProgressEventCallback race condition (#4196) --- .../Internal/MultipartDownloadManager.cs | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 0208ae789823..59d14e889b28 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -62,9 +62,9 @@ internal class MultipartDownloadManager : IDownloadManager // Atomic flag to ensure completion event fires exactly once // Without this, concurrent parts completing simultaneously can both see - // transferredBytes >= _totalObjectSize and fire duplicate completion events - // Uses int instead of bool because Interlocked.CompareExchange requires reference types - private int _completionEventFired = 0; // 0 = false, 1 = true + // transferredBytes == _totalObjectSize and fire duplicate completion events + // Uses long instead of bool for compatibility with Interlocked operations + private long _completionEventFired = 0; // 0 = false, 1 = true private readonly Logger _logger = Logger.GetLogger(typeof(MultipartDownloadManager)); @@ -733,26 +733,40 @@ private WriteObjectProgressArgs CreateProgressArgs(long incrementTransferred, lo /// Progress aggregation callback that combines progress across all concurrent part downloads. /// Uses thread-safe counter increment to handle concurrent updates. /// Detects completion naturally when transferred bytes reaches total size. - /// Uses atomic flag to ensure completion event fires exactly once. + /// Uses atomic flag to ensure completion event fires exactly once and prevents any events after completion. /// private void DownloadPartProgressEventCallback(object sender, WriteObjectProgressArgs e) { long transferredBytes = Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred); + // Check if completion was already fired - if so, skip this event entirely + // This prevents the race condition where per-part completion events arrive after + // the aggregated completion event has already been fired + if (Interlocked.Read(ref _completionEventFired) == 1) + { + return; // Already completed, don't fire any more events + } + // Use atomic CompareExchange to ensure only first thread fires completion bool isComplete = false; - if (transferredBytes >= _totalObjectSize) + if (transferredBytes == _totalObjectSize) { // CompareExchange returns the original value before the exchange // If original value was 0 (false), we're the first thread and should fire completion - int originalValue = Interlocked.CompareExchange(ref _completionEventFired, 1, 0); + long originalValue = Interlocked.CompareExchange(ref _completionEventFired, 1, 0); if (originalValue == 0) // Was false, now set to true { isComplete = true; } + else + { + // Another thread already fired completion, skip this event + return; + } } // Create and fire aggregated progress event + // Only reached if completion hasn't been fired yet var aggregatedArgs = CreateProgressArgs(e.IncrementTransferred, transferredBytes, isComplete); _userProgressCallback?.Invoke(this, aggregatedArgs); } From f2e9a53ea0cc44ac59c1a5eedece04d7d4bca6c1 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 11 Dec 2025 09:24:23 -0500 Subject: [PATCH 48/56] fix http concurrency (#4218) --- .../Transfer/Internal/FilePartDataHandler.cs | 7 +- .../Internal/MultipartDownloadManager.cs | 356 ++++++---- .../Custom/MultipartDownloadManagerTests.cs | 628 +++++++++++++++--- 3 files changed, 787 insertions(+), 204 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs index da9210465fde..4d7415a4a8f5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs @@ -90,7 +90,7 @@ public async Task ProcessPartAsync( partNumber, offset); // Write part data to file at the calculated offset - await WritePartToFileAsync(offset, response, cancellationToken) + await WritePartToFileAsync(partNumber, offset, response, cancellationToken) .ConfigureAwait(false); _logger.DebugFormat("FilePartDataHandler: [Part {0}] File write completed successfully", @@ -192,6 +192,7 @@ private long GetPartOffset(GetObjectResponse response, int partNumber) /// Writes part data from GetObjectResponse ResponseStream to the file at the specified offset. /// private async Task WritePartToFileAsync( + int partNumber, long offset, GetObjectResponse response, CancellationToken cancellationToken) @@ -213,7 +214,7 @@ private async Task WritePartToFileAsync( // Seek to the correct offset for this part fileStream.Seek(offset, SeekOrigin.Begin); - _logger.DebugFormat("FilePartDataHandler: Writing {0} bytes to file at offset {1}", + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Writing {1} bytes to file at offset {2}", partNumber, response.ContentLength, offset); // Use GetObjectResponse's stream copy logic which includes: @@ -232,7 +233,7 @@ await response.WriteResponseStreamAsync( await fileStream.FlushAsync(cancellationToken) .ConfigureAwait(false); - _logger.DebugFormat("FilePartDataHandler: Successfully wrote {0} bytes at offset {1}", + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Successfully wrote {1} bytes at offset {2}", partNumber, response.ContentLength, offset); } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 59d14e889b28..010243c8c7bd 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -173,6 +173,41 @@ public Exception DownloadException } } + /// + /// Discovers the download strategy (single-part vs multipart) by making an initial GetObject request. + /// + /// Cancellation token to cancel the discovery operation. + /// + /// A containing information about the object size, part count, + /// and the initial GetObject response. + /// + /// + /// IMPORTANT - HTTP Semaphore Lifecycle: + /// + /// This method acquires an HTTP concurrency slot from the configured semaphore and downloads Part 1. + /// The semaphore slot is HELD until completes processing Part 1. + /// Callers MUST call after this method to release the semaphore. + /// Failure to call will cause the semaphore slot to remain held indefinitely, + /// potentially blocking other downloads and causing deadlocks. + /// + /// Concurrency Implications: + /// + /// With limited HTTP concurrency (e.g., ConcurrentServiceRequests=1 for shared throttlers in directory downloads), + /// concurrent calls to this method will block until previous downloads complete their full lifecycle + /// (discover → start). This is by design to ensure the entire I/O operation (network + disk) is + /// within the concurrency limit. For single-slot throttlers, downloads must be processed sequentially: + /// complete one download's full lifecycle before starting the next. + /// + /// Typical Usage Pattern: + /// + /// var discovery = await manager.DiscoverDownloadStrategyAsync(cancellationToken); + /// await manager.StartDownloadsAsync(discovery, progressCallback, cancellationToken); + /// await manager.DownloadCompletionTask; // Wait for multipart downloads to finish + /// + /// + /// Thrown if the manager has been disposed. + /// Thrown if discovery has already been performed. + /// Thrown if the operation is cancelled. /// public async Task DiscoverDownloadStrategyAsync(CancellationToken cancellationToken) { @@ -209,6 +244,50 @@ public async Task DiscoverDownloadStrategyAsync(Cancell } } + /// + /// Processes Part 1 and starts downloading remaining parts for multipart downloads. + /// Returns immediately after processing Part 1 to allow the consumer to begin reading. + /// + /// + /// The discovery result from containing object metadata + /// and the initial GetObject response. + /// + /// + /// Optional progress callback that will be invoked as parts are downloaded. For multipart downloads, + /// progress is aggregated across all concurrent part downloads. + /// + /// Cancellation token to cancel the download operation. + /// + /// A task that completes after Part 1 is processed. For multipart downloads, remaining parts + /// continue downloading in the background (monitor via ). + /// + /// + /// HTTP Semaphore Release: + /// + /// This method processes Part 1 (downloaded during ) + /// and releases the HTTP semaphore slot that was acquired during discovery. + /// The semaphore is released after both the network download and disk write + /// operations complete for Part 1. This ensures the ConcurrentServiceRequests limit + /// controls the entire I/O operation (network + disk), not just the network download. + /// + /// Background Processing (Multipart Only): + /// + /// For multipart downloads (when TotalParts > 1), this method starts a background task + /// to download and process remaining parts (Part 2+) and returns immediately. This allows the + /// consumer to start reading from the buffer without waiting for all downloads to complete, + /// which prevents deadlocks when the buffer fills up before the consumer begins reading. + /// Monitor to detect when all background downloads have finished. + /// + /// Single-Part Downloads: + /// + /// For single-part downloads (when TotalParts = 1), this method processes Part 1 synchronously + /// and returns immediately. No background task is created, and + /// will already be completed when this method returns. + /// + /// + /// Thrown if the manager has been disposed. + /// Thrown if is null. + /// Thrown if the operation is cancelled. /// public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, EventHandler progressCallback, CancellationToken cancellationToken) { @@ -229,9 +308,6 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E try { - // Prepare the data handler (e.g., create temp files for file-based downloads) - await _dataHandler.PrepareAsync(discoveryResult, cancellationToken).ConfigureAwait(false); - // Create delegate once and reuse for all parts var wrappedCallback = progressCallback != null ? new EventHandler(DownloadPartProgressEventCallback) @@ -239,6 +315,9 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E try { + // Prepare the data handler (e.g., create temp files for file-based downloads) + await _dataHandler.PrepareAsync(discoveryResult, cancellationToken).ConfigureAwait(false); + // Attach progress callback to Part 1's response if provided if (wrappedCallback != null) { @@ -246,17 +325,26 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E } // Process Part 1 from InitialResponse (applies to both single-part and multipart) - _logger.DebugFormat("MultipartDownloadManager: Buffering Part 1 from discovery response"); + // NOTE: Semaphore is still held from discovery phase and will be released in finally block + _logger.DebugFormat("MultipartDownloadManager: Processing Part 1 from discovery response"); await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: Part 1 processing completed"); } finally { // Always detach the event handler to prevent memory leak - // This runs whether ProcessPartAsync succeeds or throws if (wrappedCallback != null) { discoveryResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; } + + // Release semaphore after BOTH network download AND disk write complete for Part 1 + // This ensures ConcurrentServiceRequests controls the entire I/O operation, + // consistent with Parts 2+ (see CreateDownloadTaskAsync) + _httpConcurrencySlots.Release(); + _logger.DebugFormat("MultipartDownloadManager: [Part 1] HTTP concurrency slot released (Available: {0}/{1})", + _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); } if (discoveryResult.IsSinglePart) @@ -374,7 +462,9 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); - // Limit HTTP concurrency + // Limit HTTP concurrency for both network download AND disk write + // The semaphore is held until AFTER ProcessPartAsync completes to ensure + // ConcurrentServiceRequests controls the entire I/O operation await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNumber); @@ -438,25 +528,27 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even } _logger.DebugFormat("MultipartDownloadManager: [Part {0}] ETag validation passed", partNumber); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing part (handler will decide: stream or buffer)", partNumber); + + // Delegate data handling to the handler + // IMPORTANT: Handler takes ownership of response and is responsible for disposing it in ALL cases: + // - If streaming: StreamingDataSource takes ownership and disposes when consumer finishes reading + // - If buffering: Handler disposes immediately after copying data to buffer + // - On error: Handler disposes in its catch block before rethrowing + await _dataHandler.ProcessPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + ownsResponse = false; // Ownership transferred to handler + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing completed successfully", partNumber); } finally { + // Release semaphore after BOTH network download AND disk write complete + // This ensures ConcurrentServiceRequests limits the entire I/O operation _httpConcurrencySlots.Release(); _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released (Available: {1}/{2})", partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); } - - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing part (handler will decide: stream or buffer)", partNumber); - - // Delegate data handling to the handler - // IMPORTANT: Handler takes ownership of response and is responsible for disposing it in ALL cases: - // - If streaming: StreamingDataSource takes ownership and disposes when consumer finishes reading - // - If buffering: Handler disposes immediately after copying data to buffer - // - On error: Handler disposes in its catch block before rethrowing - await _dataHandler.ProcessPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); - ownsResponse = false; // Ownership transferred to handler - - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing completed successfully", partNumber); } catch (Exception ex) { @@ -491,59 +583,66 @@ private async Task DiscoverUsingPartStrategyAsync(Cance await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); GetObjectResponse firstPartResponse = null; + + // NOTE: Semaphore is NOT released here - it will be released in StartDownloadsAsync + // after Part 1 is processed. This ensures the semaphore controls both network download + // AND disk write for Part 1, consistent with Parts 2+ (see CreateDownloadTaskAsync) + try { // SEP Part GET Step 2: "send the request and wait for the response in a non-blocking fashion" firstPartResponse = await _s3Client.GetObjectAsync(firstPartRequest, cancellationToken).ConfigureAwait(false); - } - finally - { - _httpConcurrencySlots.Release(); - _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); - } - - if (firstPartResponse == null) - throw new InvalidOperationException("Failed to retrieve object from S3"); - - // SEP Part GET Step 3: Save ETag for later IfMatch validation in subsequent requests - _savedETag = firstPartResponse.ETag; - - // SEP Part GET Step 3: "check the response. First parse total content length from ContentRange - // of the GetObject response and save the value in a variable. The length is the numeric value - // after / delimiter. For example, given ContentRange=bytes 0-1/5, 5 is the total content length. - // Then check PartsCount." - if (firstPartResponse.PartsCount.HasValue && firstPartResponse.PartsCount.Value > 1) - { - // SEP Part GET Step 3: "If PartsCount in the response is larger than 1, it indicates there - // are more parts available to download. The S3 Transfer Manager MUST save etag from the - // response to a variable." - _discoveredPartCount = firstPartResponse.PartsCount.Value; - // Parse total content length from ContentRange header - // For example, "bytes 0-5242879/52428800" -> extract 52428800 - var totalObjectSize = ExtractTotalSizeFromContentRange(firstPartResponse.ContentRange); + if (firstPartResponse == null) + throw new InvalidOperationException("Failed to retrieve object from S3"); - // SEP Part GET Step 7 will use this response for creating DownloadResponse - // Keep the response with its stream (will be buffered in StartDownloadsAsync) - return new DownloadDiscoveryResult + // SEP Part GET Step 3: Save ETag for later IfMatch validation in subsequent requests + _savedETag = firstPartResponse.ETag; + + // SEP Part GET Step 3: "check the response. First parse total content length from ContentRange + // of the GetObject response and save the value in a variable. The length is the numeric value + // after / delimiter. For example, given ContentRange=bytes 0-1/5, 5 is the total content length. + // Then check PartsCount." + if (firstPartResponse.PartsCount.HasValue && firstPartResponse.PartsCount.Value > 1) { - TotalParts = firstPartResponse.PartsCount.Value, - ObjectSize = totalObjectSize, - InitialResponse = firstPartResponse // Keep response with stream - }; + // SEP Part GET Step 3: "If PartsCount in the response is larger than 1, it indicates there + // are more parts available to download. The S3 Transfer Manager MUST save etag from the + // response to a variable." + _discoveredPartCount = firstPartResponse.PartsCount.Value; + + // Parse total content length from ContentRange header + // For example, "bytes 0-5242879/52428800" -> extract 52428800 + var totalObjectSize = ExtractTotalSizeFromContentRange(firstPartResponse.ContentRange); + + // SEP Part GET Step 7 will use this response for creating DownloadResponse + // Keep the response with its stream (will be buffered in StartDownloadsAsync) + return new DownloadDiscoveryResult + { + TotalParts = firstPartResponse.PartsCount.Value, + ObjectSize = totalObjectSize, + InitialResponse = firstPartResponse // Keep response with stream + }; + } + else + { + // SEP Part GET Step 3: "If PartsCount is 1, go to Step 7." + _discoveredPartCount = 1; + + // Single part upload - return the response for immediate use (SEP Step 7) + return new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = firstPartResponse.ContentLength, + InitialResponse = firstPartResponse // Keep response with stream + }; + } } - else + catch { - // SEP Part GET Step 3: "If PartsCount is 1, go to Step 7." - _discoveredPartCount = 1; - - // Single part upload - return the response for immediate use (SEP Step 7) - return new DownloadDiscoveryResult - { - TotalParts = 1, - ObjectSize = firstPartResponse.ContentLength, - InitialResponse = firstPartResponse // Keep response with stream - }; + // On error, release semaphore and dispose response before rethrowing + _httpConcurrencySlots.Release(); + firstPartResponse?.Dispose(); + throw; } } @@ -568,84 +667,91 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); GetObjectResponse firstRangeResponse = null; + + // NOTE: Semaphore is NOT released here - it will be released in StartDownloadsAsync + // after Part 1 is processed. This ensures the semaphore controls both network download + // AND disk write for Part 1, consistent with Parts 2+ (see CreateDownloadTaskAsync) + try { // SEP Ranged GET Step 2: "send the request and wait for the response in a non-blocking fashion" firstRangeResponse = await _s3Client.GetObjectAsync(firstRangeRequest, cancellationToken).ConfigureAwait(false); - } - finally - { - _httpConcurrencySlots.Release(); - _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] HTTP concurrency slot released"); - } - - // Defensive null check - if (firstRangeResponse == null) - throw new InvalidOperationException("Failed to retrieve object from S3"); - - // SEP Ranged GET Step 5: "save Etag from the response to a variable" - // (for IfMatch validation in subsequent requests) - _savedETag = firstRangeResponse.ETag; - - // SEP Ranged GET Step 3: "parse total content length from ContentRange of the GetObject response - // and save the value in a variable. The length is the numeric value after / delimiter. - // For example, given ContentRange=bytes0-1/5, 5 is the total content length." - // Check if ContentRange is null (object smaller than requested range) - if (firstRangeResponse.ContentRange == null) - { - // No ContentRange means we got the entire small object - _discoveredPartCount = 1; - return new DownloadDiscoveryResult + // Defensive null check + if (firstRangeResponse == null) + throw new InvalidOperationException("Failed to retrieve object from S3"); + + // SEP Ranged GET Step 5: "save Etag from the response to a variable" + // (for IfMatch validation in subsequent requests) + _savedETag = firstRangeResponse.ETag; + + // SEP Ranged GET Step 3: "parse total content length from ContentRange of the GetObject response + // and save the value in a variable. The length is the numeric value after / delimiter. + // For example, given ContentRange=bytes0-1/5, 5 is the total content length." + // Check if ContentRange is null (object smaller than requested range) + if (firstRangeResponse.ContentRange == null) { - TotalParts = 1, - ObjectSize = firstRangeResponse.ContentLength, - InitialResponse = firstRangeResponse // Keep response with stream - }; - } - - - // Parse total object size from ContentRange (e.g., "bytes 0-5242879/52428800" -> 52428800) - var totalContentLength = ExtractTotalSizeFromContentRange(firstRangeResponse.ContentRange); - - // SEP Ranged GET Step 4: "compare the parsed total content length from Step 3 with ContentLength - // of the response. If the parsed total content length equals to the value from ContentLength, - // it indicates this request contains all of the data. The request is finished, return the response." - if (totalContentLength == firstRangeResponse.ContentLength) - { - // Single part: total size equals returned ContentLength - // This request contains all of the data - _discoveredPartCount = 1; + // No ContentRange means we got the entire small object + _discoveredPartCount = 1; + + return new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = firstRangeResponse.ContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + + + // Parse total object size from ContentRange (e.g., "bytes 0-5242879/52428800" -> 52428800) + var totalContentLength = ExtractTotalSizeFromContentRange(firstRangeResponse.ContentRange); + + // SEP Ranged GET Step 4: "compare the parsed total content length from Step 3 with ContentLength + // of the response. If the parsed total content length equals to the value from ContentLength, + // it indicates this request contains all of the data. The request is finished, return the response." + if (totalContentLength == firstRangeResponse.ContentLength) + { + // Single part: total size equals returned ContentLength + // This request contains all of the data + _discoveredPartCount = 1; + + return new DownloadDiscoveryResult + { + TotalParts = 1, + ObjectSize = totalContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + // SEP Ranged GET Step 4: "If they do not match, it indicates there are more parts available + // to download. Add a validation to verify that ContentLength equals to the targetPartSizeBytes." + if (firstRangeResponse.ContentLength != targetPartSize) + { + throw new InvalidOperationException( + $"Expected first part size {targetPartSize} bytes, but received {firstRangeResponse.ContentLength} bytes. " + + $"Total object size is {totalContentLength} bytes."); + } + + // SEP Ranged GET Step 5: "calculate number of requests required by performing integer division + // of total contentLength/targetPartSizeBytes. Save the number of ranged GET requests in a variable." + _discoveredPartCount = (int)Math.Ceiling((double)totalContentLength / targetPartSize); + + // SEP Ranged GET Step 9 will use this response for creating DownloadResponse + // Keep the response with its stream (will be buffered in StartDownloadsAsync) return new DownloadDiscoveryResult { - TotalParts = 1, + TotalParts = _discoveredPartCount, ObjectSize = totalContentLength, InitialResponse = firstRangeResponse // Keep response with stream }; } - - // SEP Ranged GET Step 4: "If they do not match, it indicates there are more parts available - // to download. Add a validation to verify that ContentLength equals to the targetPartSizeBytes." - if (firstRangeResponse.ContentLength != targetPartSize) + catch { - throw new InvalidOperationException( - $"Expected first part size {targetPartSize} bytes, but received {firstRangeResponse.ContentLength} bytes. " + - $"Total object size is {totalContentLength} bytes."); + // On error, release semaphore and dispose response before rethrowing + _httpConcurrencySlots.Release(); + firstRangeResponse?.Dispose(); + throw; } - - // SEP Ranged GET Step 5: "calculate number of requests required by performing integer division - // of total contentLength/targetPartSizeBytes. Save the number of ranged GET requests in a variable." - _discoveredPartCount = (int)Math.Ceiling((double)totalContentLength / targetPartSize); - - // SEP Ranged GET Step 9 will use this response for creating DownloadResponse - // Keep the response with its stream (will be buffered in StartDownloadsAsync) - return new DownloadDiscoveryResult - { - TotalParts = _discoveredPartCount, - ObjectSize = totalContentLength, - InitialResponse = firstRangeResponse // Keep response with stream - }; } private GetObjectRequest CreateGetObjectRequest() diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index 7ea1c89af832..fbbd1a410975 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -1951,74 +1951,6 @@ public async Task DiscoverUsingRangeStrategy_CallsWaitForCapacityAsync() mockDataHandler.Verify(x => x.WaitForCapacityAsync(It.IsAny()), Times.Once); } - [TestMethod] - public async Task DiscoverUsingPartStrategy_AcquiresAndReleasesHttpSlot() - { - // Arrange - Use real SemaphoreSlim to track HTTP concurrency usage - var httpThrottler = new SemaphoreSlim(2, 2); // 2 concurrent requests max - var initialCount = httpThrottler.CurrentCount; - - var mockDataHandler = CreateMockDataHandler(); - var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( - 8 * 1024 * 1024, 3, 24 * 1024 * 1024, "test-etag"); - - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( - (req, ct) => Task.FromResult(mockResponse)); - - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( - downloadType: MultipartDownloadType.PART); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - - // Use shared HTTP throttler to track usage - var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); - - // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - - // Assert - Assert.IsNotNull(result); - Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP concurrency slot should be released after discovery completes"); - - // Cleanup - httpThrottler.Dispose(); - } - - [TestMethod] - public async Task DiscoverUsingRangeStrategy_AcquiresAndReleasesHttpSlot() - { - // Arrange - Use real SemaphoreSlim to track HTTP concurrency usage - var httpThrottler = new SemaphoreSlim(2, 2); // 2 concurrent requests max - var initialCount = httpThrottler.CurrentCount; - - var mockDataHandler = CreateMockDataHandler(); - var totalObjectSize = 52428800; // 50MB - var partSize = 8388608; // 8MB - var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( - 0, partSize - 1, totalObjectSize, "test-etag"); - - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( - (req, ct) => Task.FromResult(mockResponse)); - - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( - partSize: partSize, - downloadType: MultipartDownloadType.RANGE); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - - // Use shared HTTP throttler to track usage - var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); - - // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - - // Assert - Assert.IsNotNull(result); - Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP concurrency slot should be released after discovery completes"); - - // Cleanup - httpThrottler.Dispose(); - } [TestMethod] public async Task MultipleDownloads_WithSharedHttpThrottler_RespectsLimits() @@ -2044,16 +1976,22 @@ public async Task MultipleDownloads_WithSharedHttpThrottler_RespectsLimits() var coordinator1 = new MultipartDownloadManager(mockClient1.Object, request1, config, mockDataHandler1.Object, null, sharedThrottler); var coordinator2 = new MultipartDownloadManager(mockClient2.Object, request2, config, mockDataHandler2.Object, null, sharedThrottler); - // Act - Start both discoveries concurrently - var task1 = coordinator1.DiscoverDownloadStrategyAsync(CancellationToken.None); - var task2 = coordinator2.DiscoverDownloadStrategyAsync(CancellationToken.None); + var discovery1 = await coordinator1.DiscoverDownloadStrategyAsync(CancellationToken.None); + await coordinator1.StartDownloadsAsync(discovery1, null, CancellationToken.None); + + var discovery2 = await coordinator2.DiscoverDownloadStrategyAsync(CancellationToken.None); + await coordinator2.StartDownloadsAsync(discovery2, null, CancellationToken.None); - await Task.WhenAll(task1, task2); + // Wait for all background work to complete + await Task.WhenAll( + coordinator1.DownloadCompletionTask, + coordinator2.DownloadCompletionTask + ); - // Assert - Both should complete successfully despite shared throttler limits - Assert.IsNotNull(task1.Result); - Assert.IsNotNull(task2.Result); - Assert.AreEqual(1, sharedThrottler.CurrentCount, "HTTP throttler should be fully released"); + // Assert - Both should complete successfully and semaphore should be fully released + Assert.IsNotNull(discovery1); + Assert.IsNotNull(discovery2); + Assert.AreEqual(1, sharedThrottler.CurrentCount, "HTTP throttler should be fully released after complete download lifecycle"); // Cleanup coordinator1.Dispose(); @@ -2230,6 +2168,544 @@ public async Task Discovery_SinglePart_StillCallsCapacityCheck() #endregion + #region Concurrency Control Tests + + [TestMethod] + public async Task HttpSemaphore_HeldThroughProcessPartAsync() + { + // Arrange - Test that HTTP semaphore is NOT released until ProcessPartAsync completes + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Use our own semaphore to monitor its state + var concurrentRequests = 1; + var httpSemaphore = new SemaphoreSlim(concurrentRequests, concurrentRequests); + + var part1EnteredProcessPart = new TaskCompletionSource(); + var part1CanExitProcessPart = new TaskCompletionSource(); + var semaphoreWasReleasedDuringPart1 = false; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(async (partNum, response, ct) => + { + if (partNum == 1) + { + // Part 1 enters ProcessPartAsync + part1EnteredProcessPart.SetResult(true); + + // Check if semaphore has been released (it shouldn't be with the fix!) + if (httpSemaphore.CurrentCount > 0) + { + semaphoreWasReleasedDuringPart1 = true; + } + + // Block Part 1 here so we can observe semaphore state + await part1CanExitProcessPart.Task; + } + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration( + concurrentRequests: concurrentRequests); + + // Pass in our instrumented semaphore + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpSemaphore); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + var startTask = coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for Part 1 to enter ProcessPartAsync + await part1EnteredProcessPart.Task; + + // Check semaphore state while Part 1 is in ProcessPartAsync + var semaphoreAvailableDuringProcessing = httpSemaphore.CurrentCount > 0; + + // Release Part 1 to continue + part1CanExitProcessPart.SetResult(true); + + await startTask; + await coordinator.DownloadCompletionTask; + + // Assert - This is the deterministic test of the fix + Assert.IsFalse(semaphoreAvailableDuringProcessing, + "HTTP semaphore should NOT be released while ProcessPartAsync is executing. " + + "Before fix semaphore.CurrentCount would be > 0 (released early). " + + "After fix: semaphore.CurrentCount should be 0 (held through ProcessPartAsync)."); + + Assert.IsFalse(semaphoreWasReleasedDuringPart1, + "Semaphore should not have been released at any point during Part 1 ProcessPartAsync execution"); + + // Cleanup + httpSemaphore.Dispose(); + } + + [TestMethod] + public async Task HttpSemaphore_RangeStrategy_HeldThroughProcessPartAsync() + { + // Arrange - Test that RANGE strategy also holds semaphore through ProcessPartAsync + var totalObjectSize = 17 * 1024 * 1024; // 17MB -> 3 parts @ 8MB + var partSize = 8 * 1024 * 1024; + + var concurrentRequests = 1; + var httpSemaphore = new SemaphoreSlim(concurrentRequests, concurrentRequests); + + var part1EnteredProcessPart = new TaskCompletionSource(); + var part1CanExitProcessPart = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(async (partNum, response, ct) => + { + if (partNum == 1) + { + part1EnteredProcessPart.SetResult(true); + await part1CanExitProcessPart.Task; + } + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + 3, partSize, totalObjectSize, "test-etag", usePartStrategy: false); // RANGE strategy + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration( + concurrentRequests: concurrentRequests); + + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpSemaphore); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + var startTask = coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await part1EnteredProcessPart.Task; + + // Check semaphore state while Part 1 is in ProcessPartAsync + var semaphoreAvailableDuringProcessing = httpSemaphore.CurrentCount > 0; + + part1CanExitProcessPart.SetResult(true); + await startTask; + await coordinator.DownloadCompletionTask; + + // Assert + Assert.IsFalse(semaphoreAvailableDuringProcessing, + "RANGE strategy should also hold HTTP semaphore through ProcessPartAsync"); + + // Cleanup + httpSemaphore.Dispose(); + } + + #endregion + + #region Semaphore Release Error Path Tests + + [TestMethod] + public async Task StartDownloadsAsync_PrepareAsyncFails_ReleasesHttpSemaphore() + { + // Arrange - PrepareAsync fails but semaphore was acquired during discovery + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds (buffer space available) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // PrepareAsync fails BEFORE Part 1 processing + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + 8 * 1024 * 1024, 2, 16 * 1024 * 1024, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // After discovery, semaphore should have 1 slot held (2 total - 1 used = 1 available) + Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, + "After discovery, semaphore should have 1 slot held"); + + // Act & Assert + try + { + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated prepare failure", ex.Message); + } + + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when PrepareAsync fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadsAsync_Part1ProcessingFails_ReleasesHttpSemaphore() + { + // Arrange - Test that finally block correctly releases semaphore when Part 1 processing fails + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // PrepareAsync succeeds + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync fails for Part 1 + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated Part 1 processing failure")); + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + 8 * 1024 * 1024, 2, 16 * 1024 * 1024, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // After discovery, semaphore should have 1 slot held + Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, + "After discovery, semaphore should have 1 slot held"); + + // Act & Assert + try + { + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated Part 1 processing failure", ex.Message); + } + + // Assert - Finally block should release semaphore + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released by finally block when Part 1 processing fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_WaitForCapacityFails_DoesNotReleaseHttpSemaphore() + { + // Arrange - Test that semaphore is NOT released when it was never acquired + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync fails BEFORE HTTP semaphore is acquired + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated capacity wait failure")); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated capacity wait failure", ex.Message); + } + + // Assert - Semaphore should NOT be released (it was never acquired) + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should NOT be released when it was never acquired (failed before WaitAsync)"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundPartHttpFails_ReleasesHttpSemaphore() + { + // Arrange - Test that background part download failures properly release semaphore + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds for all parts + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // PrepareAsync succeeds + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync succeeds for Part 1, but not called for Part 2 (HTTP fails first) + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // ReleaseCapacity is called on failure + mockDataHandler + .Setup(x => x.ReleaseCapacity()); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // Discovery call succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + else + { + // Background part HTTP request fails + throw new InvalidOperationException("Simulated HTTP failure for background part"); + } + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // After discovery, semaphore should have 1 slot held (for Part 1) + Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, + "After discovery, semaphore should have 1 slot held"); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for background task to fail + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure from background task + } + + // Assert - Semaphore should be fully released (Part 1 released in StartDownloadsAsync, + // Parts 2 and 3 released in CreateDownloadTaskAsync catch blocks) + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be fully released after background part HTTP failure"); + + // Verify ReleaseCapacity was called twice (once for Part 2 that failed, once for Part 3 that got cancelled) + // With sequential capacity acquisition, Part 3 acquired capacity before Part 2's HTTP call failed + mockDataHandler.Verify(x => x.ReleaseCapacity(), Times.Exactly(2), + "ReleaseCapacity should be called for both Part 2 (failed) and Part 3 (cancelled after acquiring capacity)"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundPartProcessingFails_ReleasesHttpSemaphore() + { + // Arrange - Test that background part ProcessPartAsync failures properly release semaphore + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds for all parts + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // PrepareAsync succeeds + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync succeeds for Part 1, fails for Part 2 + var processCallCount = 0; + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + processCallCount++; + if (partNum == 1) + { + return Task.CompletedTask; // Part 1 succeeds + } + throw new InvalidOperationException($"Simulated processing failure for Part {partNum}"); + }); + + // ReleaseCapacity is called on failure + mockDataHandler + .Setup(x => x.ReleaseCapacity()); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // After discovery, semaphore should have 1 slot held + Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, + "After discovery, semaphore should have 1 slot held"); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for background task to fail + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure from background task + } + + // Assert - Semaphore should be fully released + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be fully released after background part processing failure"); + + // Verify ReleaseCapacity was called twice (once for Part 2 that failed, once for Part 3 that may have continued) + // With sequential capacity acquisition, Part 3 acquired capacity before Part 2's processing failed + mockDataHandler.Verify(x => x.ReleaseCapacity(), Times.Exactly(2), + "ReleaseCapacity should be called for both Part 2 (failed) and Part 3 (cancelled/failed after acquiring capacity)"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_HttpRequestAfterCapacityFails_ReleasesHttpSemaphore() + { + // Arrange - Test semaphore release when HTTP request fails after capacity is acquired + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds (capacity acquired) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // HTTP request fails AFTER both capacity types are acquired + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated S3 failure after capacity acquired")); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated S3 failure after capacity acquired", ex.Message); + } + + // Assert - HTTP semaphore should be released by catch block in discovery + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when HTTP request fails in discovery"); + + // Cleanup + httpThrottler.Dispose(); + } + + #endregion + #region ContentRange and Part Range Calculation Tests [TestMethod] From 26431080ede93af49679085725ec0e8b3af6487c Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 11 Dec 2025 09:28:05 -0500 Subject: [PATCH 49/56] optimize task creation (#4219) --- .../Internal/MultipartDownloadManager.cs | 78 +++++- .../Custom/MultipartDownloadManagerTests.cs | 256 ++++++++++++++++++ 2 files changed, 319 insertions(+), 15 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 010243c8c7bd..939cedffad55 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -379,19 +379,38 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); // Acquire capacity sequentially - guarantees Part 2 before Part 3, etc. - await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); + await _dataHandler.WaitForCapacityAsync(internalCts.Token).ConfigureAwait(false); _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); - var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); - downloadTasks.Add(task); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", + partNum, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + + // Acquire HTTP slot in the loop before creating task + // Loop will block here if all slots are in use + await _httpConcurrencySlots.WaitAsync(internalCts.Token).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNum); + + try + { + var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); + downloadTasks.Add(task); + } + catch (Exception ex) + { + // If task creation fails, release the HTTP slot we just acquired + _httpConcurrencySlots.Release(); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released due to task creation failure: {1}", partNum, ex); + throw; + } } var expectedTaskCount = downloadTasks.Count; _logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); // Wait for all downloads to complete (fails fast on first exception) - await TaskHelpers.WhenAllOrFirstExceptionAsync(downloadTasks, cancellationToken).ConfigureAwait(false); + await TaskHelpers.WhenAllOrFirstExceptionAsync(downloadTasks, internalCts.Token).ConfigureAwait(false); _logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); @@ -418,7 +437,27 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E catch (Exception ex) { _downloadException = ex; - _logger.Error(ex, "MultipartDownloadManager: Background download task failed"); + + + + // Cancel all remaining downloads immediately to prevent cascading timeout errors + // This ensures that when one part fails, other tasks stop gracefully instead of + // continuing until they hit their own timeout/cancellation errors + // Check if cancellation was already requested to avoid ObjectDisposedException + if (!internalCts.IsCancellationRequested) + { + try + { + internalCts.Cancel(); + _logger.DebugFormat("MultipartDownloadManager: Cancelled all in-flight downloads due to error"); + } + catch (ObjectDisposedException) + { + // CancellationTokenSource was already disposed, ignore + _logger.DebugFormat("MultipartDownloadManager: CancellationTokenSource already disposed during cancellation"); + } + } + _dataHandler.OnDownloadComplete(ex); throw; } @@ -440,6 +479,22 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E _downloadException = ex; _logger.Error(ex, "MultipartDownloadManager: Download failed"); + // Cancel all remaining downloads immediately to prevent cascading timeout errors + // Check if cancellation was already requested to avoid ObjectDisposedException + if (!internalCts.IsCancellationRequested) + { + try + { + internalCts.Cancel(); + _logger.DebugFormat("MultipartDownloadManager: Cancelled all in-flight downloads due to error"); + } + catch (ObjectDisposedException) + { + // CancellationTokenSource was already disposed, ignore + _logger.DebugFormat("MultipartDownloadManager: CancellationTokenSource already disposed during cancellation"); + } + } + _dataHandler.OnDownloadComplete(ex); // Dispose the CancellationTokenSource if background task was never started @@ -459,15 +514,8 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even try { - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", - partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); - - // Limit HTTP concurrency for both network download AND disk write - // The semaphore is held until AFTER ProcessPartAsync completes to ensure - // ConcurrentServiceRequests controls the entire I/O operation - await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); - - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNumber); + // HTTP slot was already acquired in the for loop before this task was created + // We just need to use it and release it when done try { @@ -544,7 +592,7 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even finally { // Release semaphore after BOTH network download AND disk write complete - // This ensures ConcurrentServiceRequests limits the entire I/O operation + // Slot was acquired in the for loop before this task was created _httpConcurrencySlots.Release(); _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released (Available: {1}/{2})", partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index fbbd1a410975..ecf90cbc4d67 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -3337,5 +3337,261 @@ public async Task ProgressCallback_MultiplePartsComplete_AggregatesCorrectly() } #endregion + + #region Cancellation Enhancement Tests + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundPartFails_CancelsInternalToken() + { + // Arrange - Deterministic test using TaskCompletionSource to control execution order + // This ensures Part 3 waits at synchronization point, Part 2 fails, then Part 3 checks cancellation + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var part2Failed = false; + var part3SawCancellation = false; + + // Synchronization primitives to control execution order + var part3ReachedSyncPoint = new TaskCompletionSource(); + var part2CanFail = new TaskCompletionSource(); + var part3CanCheckCancellation = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + // Capacity acquisition succeeds for all parts + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // PrepareAsync succeeds + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync: Controlled execution order using TaskCompletionSource + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(async (partNum, response, ct) => + { + if (partNum == 1) + { + return; // Part 1 succeeds immediately + } + else if (partNum == 2) + { + // Part 2 waits for Part 3 to reach sync point before failing + await part2CanFail.Task; + part2Failed = true; + throw new InvalidOperationException("Simulated Part 2 failure"); + } + else // Part 3 + { + // Part 3 reaches sync point and signals to Part 2 + part3ReachedSyncPoint.SetResult(true); + + // Wait for Part 2 to fail and cancellation to propagate + await part3CanCheckCancellation.Task; + + // Now check if cancellation was received from internalCts + if (ct.IsCancellationRequested) + { + part3SawCancellation = true; + throw new OperationCanceledException("Part 3 cancelled due to Part 2 failure"); + } + } + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act - Start downloads + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + // Wait for Part 3 to reach synchronization point + await part3ReachedSyncPoint.Task; + + // Allow Part 2 to fail + part2CanFail.SetResult(true); + + // Give cancellation time to propagate + await Task.Delay(100); + + // Allow Part 3 to check cancellation + part3CanCheckCancellation.SetResult(true); + + // Wait for background task to complete + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure from Part 2 + } + + // Assert - Deterministic verification that cancellation propagated + Assert.IsTrue(part2Failed, "Part 2 should have failed"); + Assert.IsTrue(part3SawCancellation, + "Part 3 should have received cancellation via internalCts.Token (deterministic with TaskCompletionSource)"); + + Assert.IsNotNull(coordinator.DownloadException, + "Download exception should be captured when background part fails"); + Assert.IsInstanceOfType(coordinator.DownloadException, typeof(InvalidOperationException), + "Download exception should be the Part 2 failure"); + } + + [TestMethod] + public async Task StartDownloadsAsync_MultiplePartsFail_HandlesGracefully() + { + // Arrange - Test simultaneous failures from multiple parts + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var failedParts = new System.Collections.Concurrent.ConcurrentBag(); + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1 succeeds, Parts 2, 3, 4 all fail + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum == 1) + { + return Task.CompletedTask; + } + + failedParts.Add(partNum); + throw new InvalidOperationException($"Simulated Part {partNum} failure"); + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected - at least one part failed + } + + // Assert - Should handle multiple failures gracefully + Assert.IsTrue(failedParts.Count > 0, "At least one part should have failed"); + Assert.IsNotNull(coordinator.DownloadException, "Download exception should be captured"); + } + + [TestMethod] + public async Task StartDownloadsAsync_CancellationRacesWithDispose_HandlesGracefully() + { + // Arrange - Test race condition between Cancel() and Dispose() + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var objectDisposedExceptionCaught = false; + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1 succeeds, Part 2 fails triggering cancellation + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum == 1) + { + return Task.CompletedTask; + } + + // Part 2 failure will trigger Cancel() in catch block + // The enhancement should check IsCancellationRequested to avoid ObjectDisposedException + throw new InvalidOperationException("Simulated Part 2 failure"); + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())) + .Callback(ex => + { + // Check if ObjectDisposedException was handled + if (ex is ObjectDisposedException) + { + objectDisposedExceptionCaught = true; + } + }); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + + // Act + await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure + } + + // Assert - The enhancement should prevent ObjectDisposedException from being thrown + // by checking IsCancellationRequested before calling Cancel() + Assert.IsFalse(objectDisposedExceptionCaught, + "ObjectDisposedException should not propagate due to IsCancellationRequested check"); + Assert.IsNotNull(coordinator.DownloadException, + "Download exception should be the original failure, not ObjectDisposedException"); + Assert.IsInstanceOfType(coordinator.DownloadException, typeof(InvalidOperationException), + "Download exception should be the original InvalidOperationException from Part 2 failure"); + } + + #endregion } } From 821729c62971f63292e87cf539142589b00c363d Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 11 Dec 2025 09:29:24 -0500 Subject: [PATCH 50/56] use max size semaphore (#4220) --- .../Internal/BufferedPartDataHandler.cs | 3 - .../Internal/MultipartDownloadManager.cs | 5 +- .../Transfer/Internal/PartBufferManager.cs | 5 +- .../Custom/BufferedPartDataHandlerTests.cs | 144 ++++++++++- .../Custom/MultipartDownloadManagerTests.cs | 78 +++--- .../Custom/PartBufferManagerTests.cs | 239 ++++++++++++++++++ 6 files changed, 424 insertions(+), 50 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs index 82d0a8f4d590..256a0228d086 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -153,9 +153,6 @@ private void ProcessStreamingPart( // If ReleaseBufferSpace() throws, we no longer own the data source, so we won't dispose it streamingDataSource = null; - // Release capacity immediately since we're not holding anything in memory - _partBufferManager.ReleaseBufferSpace(); - _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] StreamingDataSource added and capacity released", partNumber); } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 939cedffad55..1537d48c58da 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -159,7 +159,10 @@ public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, } else { - _httpConcurrencySlots = new SemaphoreSlim(_config.ConcurrentServiceRequests); + _httpConcurrencySlots = new SemaphoreSlim( + _config.ConcurrentServiceRequests, // initialCount + _config.ConcurrentServiceRequests // maxCount - prevents exceeding configured limit + ); _ownsHttpThrottler = true; // We own it, so we dispose it } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs index 16baf6644384..33edf2fa0ad1 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs @@ -199,7 +199,10 @@ public PartBufferManager(BufferedDownloadConfiguration config) throw new ArgumentNullException(nameof(config)); _partDataSources = new ConcurrentDictionary(); - _bufferSpaceAvailable = new SemaphoreSlim(config.MaxInMemoryParts); + _bufferSpaceAvailable = new SemaphoreSlim( + config.MaxInMemoryParts, // initialCount + config.MaxInMemoryParts // maxCount - prevents exceeding configured limit + ); _partAvailable = new AutoResetEvent(false); Logger.DebugFormat("PartBufferManager initialized with MaxInMemoryParts={0}", config.MaxInMemoryParts); diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs index 6a98b8fbaba2..645dae927051 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs @@ -463,10 +463,6 @@ public async Task ProcessPartAsync_InOrderVsOutOfOrder_VerifyStreamingVsBufferin // Part 3 should use buffering path (ArrayPool allocation) Assert.AreEqual(1, bufferedPartNumbers.Count, "Expected exactly 1 part to be buffered"); Assert.AreEqual(3, bufferedPartNumbers[0], "Part 3 should be buffered"); - - // Verify ReleaseBufferSpace was called for streaming path (immediate capacity release) - mockBufferManager.Verify(m => m.ReleaseBufferSpace(), Times.Once, - "Streaming path should release capacity immediately"); } finally { @@ -525,10 +521,6 @@ public async Task ProcessPartAsync_AllInOrderParts_NoBufferingAllStreaming() Assert.AreEqual(i + 1, streamingPartNumbers[i], $"Part {i + 1} should have streamed in order"); } - - // Verify capacity was released 5 times (once per streaming part) - mockBufferManager.Verify(m => m.ReleaseBufferSpace(), Times.Exactly(5), - "Capacity should be released immediately for each streaming part"); } finally { @@ -766,6 +758,142 @@ public void Dispose_MultipleCalls_IsIdempotent() #endregion + #region Semaphore Double Release Fix Tests + + [TestMethod] + public async Task ProcessPartAsync_StreamingPart_ReleasesCapacityOnlyOnce() + { + // This test verifies the fix for the double release bug in BufferedPartDataHandler. + // Before the fix: ProcessStreamingPart() called ReleaseBufferSpace() immediately after + // adding the StreamingDataSource, causing capacity to be released twice (once immediately, + // once later when the consumer finished reading the part). + // After the fix: The immediate ReleaseBufferSpace() call was removed. Capacity is released + // only once when the consumer finishes reading the part through PartBufferManager. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + var releaseCount = 0; + mockBufferManager.Setup(m => m.ReleaseBufferSpace()) + .Callback(() => releaseCount++); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act - Process an in-order (streaming) part + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert - ReleaseBufferSpace should NOT have been called during ProcessPartAsync + // (The removed code that called it immediately has been deleted) + // Capacity will be released later by PartBufferManager when consumer finishes reading + Assert.AreEqual(0, releaseCount, + "ProcessPartAsync should not release capacity for streaming parts. " + + "Capacity is released by PartBufferManager when consumer completes reading."); + + // Verify AddBuffer was called with StreamingDataSource (streaming path taken) + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource)), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_BufferedPart_DoesNotReleaseCapacityImmediately() + { + // This test verifies that buffered (out-of-order) parts don't release capacity immediately. + // Capacity is released later by PartBufferManager when the consumer finishes reading the part. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + var releaseCount = 0; + mockBufferManager.Setup(m => m.ReleaseBufferSpace()) + .Callback(() => releaseCount++); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act - Process an out-of-order (buffered) part + await handler.ProcessPartAsync(3, response, CancellationToken.None); + + // Assert - ReleaseBufferSpace should NOT have been called + // Capacity will be released later by PartBufferManager when consumer finishes reading + Assert.AreEqual(0, releaseCount, + "ProcessPartAsync should not release capacity for buffered parts. " + + "Capacity is released by PartBufferManager when consumer completes reading."); + + // Verify AddBuffer was called with StreamPartBuffer (buffering path taken) + mockBufferManager.Verify(m => m.AddBuffer( + It.IsAny()), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_StreamingPartError_DoesNotDoubleRelease() + { + // This test verifies that when an error occurs during streaming part processing, + // capacity is released correctly through ReleaseCapacity() without double-releasing. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + var releaseCount = 0; + mockBufferManager.Setup(m => m.ReleaseBufferSpace()) + .Callback(() => releaseCount++); + + // Simulate error when adding buffer + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Throws(new InvalidOperationException("Test error")); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act & Assert - Should throw + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(1, response, CancellationToken.None); + }); + + // Verify ReleaseBufferSpace was NOT called during error handling + // (The old double-release bug would have called it, causing issues) + Assert.AreEqual(0, releaseCount, + "Error handling should not release capacity for streaming parts. " + + "Streaming parts don't hold capacity slots in BufferedPartDataHandler."); + } + finally + { + handler.Dispose(); + } + } + + #endregion + #region Helper Methods /// diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index ecf90cbc4d67..d39db70c38d3 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -601,25 +601,19 @@ public async Task DiscoverUsingRangeStrategy_CalculatesPartCount() public async Task StartDownloadsAsync_SinglePart_ReturnsImmediately() { // Arrange - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = new DownloadDiscoveryResult - { - TotalParts = 1, - ObjectSize = 1024, - InitialResponse = new GetObjectResponse() - }; - - var mockBufferManager = new Mock(); - - // Act + // Act - Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); - // Assert - should complete without any downloads - mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Never); + // Assert - should complete without any additional downloads (discovery already made the call) + mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Once); } [TestMethod] @@ -1230,22 +1224,35 @@ public async Task StartDownloadsAsync_EarlyError_DisposesCancellationTokenSource // Arrange - Test CancellationTokenSource disposal when error occurs before background task starts var mockDataHandler = new Mock(); + // WaitForCapacityAsync succeeds (needed for discovery) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync succeeds for Part 1 (discovery) + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + // Simulate error during PrepareAsync (before background task is created) mockDataHandler .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = new DownloadDiscoveryResult - { - TotalParts = 2, - ObjectSize = 16 * 1024 * 1024, - InitialResponse = new GetObjectResponse() - }; + // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); // Act & Assert try @@ -1599,17 +1606,16 @@ public async Task StartDownloadsAsync_PassesCancellationTokenToBufferManager() public async Task StartDownloadsAsync_SinglePart_DoesNotThrowOnCancellation() { // Arrange - Single part download should return immediately without using cancellation token - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = new DownloadDiscoveryResult - { - TotalParts = 1, - ObjectSize = 1024, - InitialResponse = new GetObjectResponse() - }; + // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); var cts = new CancellationTokenSource(); cts.Cancel(); @@ -1617,8 +1623,8 @@ public async Task StartDownloadsAsync_SinglePart_DoesNotThrowOnCancellation() // Act - should complete without throwing even though token is cancelled await coordinator.StartDownloadsAsync(discoveryResult, null, cts.Token); - // Assert - no exception thrown, no S3 calls made - mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Never); + // Assert - discovery already made the S3 call, StartDownloadsAsync doesn't make additional calls for single-part + mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Once); } [TestMethod] @@ -1831,19 +1837,17 @@ public async Task StartDownloadsAsync_ReturnsImmediately_PreventsDeadlock() public async Task StartDownloadsAsync_SinglePart_ReturnsImmediatelyWithoutBackgroundTask() { // Arrange - Single-part downloads should not create background tasks - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var mockDataHandler = CreateMockDataHandler(); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = new DownloadDiscoveryResult - { - TotalParts = 1, - ObjectSize = 1024, - InitialResponse = new GetObjectResponse() - }; + // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore + var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); // Act var stopwatch = System.Diagnostics.Stopwatch.StartNew(); diff --git a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs index 121c50f0dede..37ab24be0fb3 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs @@ -82,6 +82,7 @@ public async Task NextExpectedPartNumber_IncrementsAfterPartComplete() // Add part 1 byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer); // Read part 1 completely @@ -244,6 +245,8 @@ public async Task AddBuffer_CreatesBufferedDataSource() byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + // Act manager.AddBuffer(partBuffer); @@ -301,6 +304,7 @@ public async Task AddBuffer_SignalsPartAvailable() // Add the part byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer); // Assert - Read should complete @@ -331,6 +335,7 @@ public async Task AddDataSource_AddsToCollection() var dataSource = new BufferedDataSource(partBuffer); // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddDataSource(dataSource); // Assert - Should be able to read from part 1 @@ -415,6 +420,7 @@ public async Task ReadAsync_ReadsDataSequentially() Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer); // Act @@ -443,6 +449,7 @@ public async Task ReadAsync_AdvancesNextExpectedPartNumber() // Add part 1 byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer); // Read part 1 completely @@ -576,6 +583,7 @@ public async Task ReadAsync_WaitsForPartAvailability() // Add the part asynchronously byte[] testBuffer = ArrayPool.Shared.Rent(512); var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer); // Assert - Read should complete @@ -657,6 +665,7 @@ public async Task ReadAsync_ReadingAcrossPartBoundary_FillsBuffer() byte[] testBuffer1 = ArrayPool.Shared.Rent(100); Buffer.BlockCopy(testData1, 0, testBuffer1, 0, 100); var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer1); // Add Part 2 (100 bytes) @@ -664,6 +673,7 @@ public async Task ReadAsync_ReadingAcrossPartBoundary_FillsBuffer() byte[] testBuffer2 = ArrayPool.Shared.Rent(100); Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer2); // Act - Request 150 bytes (spans both parts) @@ -704,6 +714,7 @@ public async Task ReadAsync_MultiplePartsInSingleRead_AdvancesCorrectly() byte[] testBuffer = ArrayPool.Shared.Rent(50); Buffer.BlockCopy(testData, 0, testBuffer, 0, 50); var partBuffer = new StreamPartBuffer(i, testBuffer, 50); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer); } @@ -733,6 +744,7 @@ public async Task ReadAsync_PartCompletes_AdvancesToNextPart() // Add part 1 byte[] testBuffer1 = ArrayPool.Shared.Rent(100); var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer1); // Read part 1 completely @@ -745,6 +757,7 @@ public async Task ReadAsync_PartCompletes_AdvancesToNextPart() // Add part 2 byte[] testBuffer2 = ArrayPool.Shared.Rent(100); var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer2); // Read part 2 @@ -772,6 +785,7 @@ public async Task ReadAsync_EmptyPart_ContinuesToNextPart() // Add empty part 1 byte[] testBuffer1 = ArrayPool.Shared.Rent(100); var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 0); // 0 bytes + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer1); // Add part 2 with data @@ -779,6 +793,7 @@ public async Task ReadAsync_EmptyPart_ContinuesToNextPart() byte[] testBuffer2 = ArrayPool.Shared.Rent(100); Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer2); // Act - Try to read 100 bytes starting from part 1 @@ -959,6 +974,7 @@ public async Task AddBufferAsync_IPartDataSource_WithStreamingDataSource_AddsSuc var streamingSource = new StreamingDataSource(1, response); // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(streamingSource); // Assert - Should be able to read from part 1 @@ -990,6 +1006,7 @@ public async Task AddBufferAsync_IPartDataSource_WithBufferedDataSource_AddsSucc var bufferedSource = new BufferedDataSource(partBuffer); // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(bufferedSource); // Assert - Should be able to read from part 1 @@ -1054,6 +1071,7 @@ public async Task AddBufferAsync_IPartDataSource_SignalsPartAvailable() var streamingSource = new StreamingDataSource(1, response); // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(streamingSource); // Assert - Read should complete @@ -1087,6 +1105,7 @@ public async Task ReadAsync_FromStreamingDataSource_ReadsCorrectly() ResponseStream = new MemoryStream(testData) }; var streamingSource = new StreamingDataSource(1, response); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(streamingSource); // Act - Read in multiple chunks @@ -1126,6 +1145,7 @@ public async Task ReadAsync_FromMixedSources_ReadsSequentially() ResponseStream = new MemoryStream(testData1) }; var streamingSource = new StreamingDataSource(1, response1); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer((IPartDataSource)streamingSource); // Add buffered source for part 2 @@ -1133,6 +1153,7 @@ public async Task ReadAsync_FromMixedSources_ReadsSequentially() byte[] testBuffer2 = ArrayPool.Shared.Rent(500); Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 500); var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 500); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(partBuffer2); // Act - Read across both parts @@ -1173,6 +1194,7 @@ public async Task ReadAsync_StreamingDataSource_DisposesAfterCompletion() ResponseStream = new MemoryStream(testData) }; var streamingSource = new StreamingDataSource(1, response); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(streamingSource); // Act - Read all data @@ -1208,6 +1230,7 @@ public async Task ReadAsync_MultipleStreamingSources_ReadsSequentially() ResponseStream = new MemoryStream(testData) }; var streamingSource = new StreamingDataSource(i, response); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); manager.AddBuffer(streamingSource); } @@ -1368,6 +1391,9 @@ public async Task NextExpectedPartNumber_ConcurrentReads_SeeConsistentValue() // by adding and reading parts sequentially for (int partNum = 1; partNum <= NumIncrements; partNum++) { + // Wait for buffer space before adding part + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + // Add part byte[] testBuffer = ArrayPool.Shared.Rent(100); var partBuffer = new StreamPartBuffer(partNum, testBuffer, 100); @@ -1413,5 +1439,218 @@ public async Task NextExpectedPartNumber_ConcurrentReads_SeeConsistentValue() } #endregion + + #region Semaphore MaxCount Tests + + [TestMethod] + public async Task WaitForBufferSpaceAsync_WithMaxCount_DoesNotExceedConfiguredLimit() + { + // This test verifies the fix for the double release bug. + // Before the fix: SemaphoreSlim without maxCount allowed unlimited Release() calls, + // which could corrupt the semaphore state and allow more concurrent operations than configured. + // After the fix: maxCount parameter prevents exceeding MaxInMemoryParts limit. + + // Arrange + const int maxInMemoryParts = 3; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: maxInMemoryParts); + var manager = new PartBufferManager(config); + + try + { + // Acquire all available slots + for (int i = 0; i < maxInMemoryParts; i++) + { + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + } + + // Release all acquired slots + for (int i = 0; i < maxInMemoryParts; i++) + { + manager.ReleaseBufferSpace(); + } + + // Attempt to release beyond maxCount (should throw) + Assert.ThrowsException(() => + { + manager.ReleaseBufferSpace(); + }, "Releasing beyond maxCount should throw SemaphoreFullException"); + + // Attempt one more release to confirm protection is consistent + Assert.ThrowsException(() => + { + manager.ReleaseBufferSpace(); + }, "Second excessive release should also throw SemaphoreFullException"); + + // Act - Try to acquire slots again + var acquiredSlots = 0; + for (int i = 0; i < maxInMemoryParts + 2; i++) + { + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + if (await Task.WhenAny(waitTask, Task.Delay(100)) == waitTask) + { + acquiredSlots++; + } + else + { + break; // Task didn't complete, no more slots available + } + } + + // Assert - Should only be able to acquire maxInMemoryParts slots, not more + // With maxCount fix: Can only acquire 3 slots (respects limit) + // Without maxCount fix: Could acquire 5 slots (2 extra from double releases) + Assert.AreEqual(maxInMemoryParts, acquiredSlots, + $"Semaphore should respect maxCount={maxInMemoryParts} limit despite excessive releases"); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReleaseBufferSpace_ExcessiveReleases_MaintainsSemaphoreIntegrity() + { + // This test verifies that excessive Release() calls don't corrupt semaphore state. + // The maxCount parameter ensures CurrentCount never exceeds MaxInMemoryParts. + + // Arrange + const int maxInMemoryParts = 5; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: maxInMemoryParts); + var manager = new PartBufferManager(config); + + try + { + // Acquire half the slots + for (int i = 0; i < maxInMemoryParts / 2; i++) + { + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + } + + // Release the acquired slots + for (int i = 0; i < maxInMemoryParts / 2; i++) + { + manager.ReleaseBufferSpace(); + } + + // Now semaphore should be at full capacity (maxInMemoryParts) + // Attempt to release beyond maxCount - each should throw + var excessiveReleaseCount = 0; + for (int i = 0; i < 5; i++) + { + try + { + manager.ReleaseBufferSpace(); + Assert.Fail($"Release #{i + 1} beyond maxCount should have thrown SemaphoreFullException"); + } + catch (SemaphoreFullException) + { + excessiveReleaseCount++; + } + } + + // Assert - All excessive releases should have thrown + Assert.AreEqual(5, excessiveReleaseCount, "All excessive releases should throw SemaphoreFullException"); + + // Act - Count how many slots are now available + var availableSlots = 0; + for (int i = 0; i < maxInMemoryParts * 2; i++) + { + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + if (waitTask.IsCompleted) + { + availableSlots++; + await waitTask; + } + else + { + break; + } + } + + // Assert - Should never exceed maxInMemoryParts + Assert.IsTrue(availableSlots <= maxInMemoryParts, + $"Available slots ({availableSlots}) should not exceed maxInMemoryParts ({maxInMemoryParts})"); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task BufferCapacity_ConcurrentOperations_RespectsMaxCountLimit() + { + // This test simulates the real-world scenario where multiple parts are being + // processed concurrently, verifying that the maxCount parameter prevents + // exceeding the configured buffer capacity limit. + + // Arrange + const int maxInMemoryParts = 4; + const int totalParts = 10; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: maxInMemoryParts); + var manager = new PartBufferManager(config); + + try + { + var activeParts = 0; + var maxActiveParts = 0; + var lockObj = new object(); + + // Simulate concurrent part processing + var tasks = new List(); + for (int partNum = 1; partNum <= totalParts; partNum++) + { + int capturedPartNum = partNum; + tasks.Add(Task.Run(async () => + { + // Wait for buffer space (enforces maxInMemoryParts limit) + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + lock (lockObj) + { + activeParts++; + if (activeParts > maxActiveParts) + { + maxActiveParts = activeParts; + } + } + + // Simulate buffering the part + byte[] testBuffer = ArrayPool.Shared.Rent(100); + var partBuffer = new StreamPartBuffer(capturedPartNum, testBuffer, 100); + manager.AddBuffer(partBuffer); + + // Simulate some processing time + await Task.Delay(10); + + // Consumer reads the part (happens asynchronously in real scenario) + // For this test, we'll manually release after a delay + await Task.Delay(20); + + lock (lockObj) + { + activeParts--; + } + + // Release is normally done by consumer after reading part + manager.ReleaseBufferSpace(); + })); + } + + // Wait for all parts to be processed + await Task.WhenAll(tasks); + + // Assert - Should never have exceeded maxInMemoryParts + Assert.IsTrue(maxActiveParts <= maxInMemoryParts, + $"Maximum concurrent buffered parts ({maxActiveParts}) exceeded configured limit ({maxInMemoryParts})"); + } + finally + { + manager.Dispose(); + } + } + + #endregion } } From b169a958966b7f29877f6b408d597388321a079c Mon Sep 17 00:00:00 2001 From: Phil Asmar Date: Thu, 11 Dec 2025 12:01:31 -0500 Subject: [PATCH 51/56] fix retrying valid IO exceptions --- .../Transfer/Internal/AtomicFileHandler.cs | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs index 5c9f6909c92a..70756ff10be3 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs @@ -33,6 +33,7 @@ internal class AtomicFileHandler : IDisposable { private string _tempFilePath; private bool _disposed = false; + private static readonly object _fileLock = new object(); /// /// Creates a temporary file with unique identifier for atomic operations. @@ -68,10 +69,24 @@ public string CreateTemporaryFile(string destinationPath) _tempFilePath = tempPath; return tempPath; } - catch (IOException) when (attempt < 99) + catch (IOException) { - // File exists, try again with new ID - continue; + lock (_fileLock) + { + // If the file now exists when we check immediately after the exception, + // it means another process or thread beat us to the creation (race condition). + if (File.Exists(tempPath)) + { + // File exists, try again with new ID + continue; + } + else + { + // The file does *not* exist, which means the IOException was caused by + // something else entirely (e.g., permissions, disk full, network error). + throw; // Re-throw the original exception as it was an unexpected error. + } + } } } From 2c4f2acd966224811b477de08eb3c60797d234e0 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 11 Dec 2025 12:23:16 -0500 Subject: [PATCH 52/56] update dev configs (#4201) --- .../19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json | 6 +++--- .../252dad9f-d2a9-4d49-bff8-000924f0add4.json | 11 ----------- .../433a9a6d-b8ea-4676-b763-70711e8288e2.json | 4 ++-- .../433a9a6d-b8ea-4676-b763-70711e8288e3.json | 2 +- .../433a9a6d-b8ea-4676-b763-70711e8288e4.json | 11 ----------- .../433a9a6d-b8ea-4676-b763-70711e8288e6.json | 11 ----------- .../55fe9e14-c79e-4426-9828-deae0451d4f6.json | 2 +- .../77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json | 2 +- .../7f23582e-3225-487b-83e7-167cf17cb231.json | 11 ----------- .../7f23582e-3225-487b-83e7-167cf17cb234.json | 4 ++-- .../7f23582e-3225-487b-83e7-167cf17cb238.json | 11 ----------- .../984a2bde-687f-4ed1-b6eb-03f15b257416.json | 11 ----------- .../9d07dc1e-d82d-4f94-8700-c7b57f872042.json | 4 ++-- .../9d07dc1e-d82d-4f94-8700-c7b57f872043.json | 4 ++-- .../9d07dc1e-d82d-4f94-8700-c7b57f872044.json | 4 ++-- .../9d07dc1e-d82d-4f94-8700-c7b57f872123.json | 4 ++-- .../9d07dc1e-d82d-4f94-8700-c7b57f872124.json | 4 ++-- .../c49077d9-90b3-437f-b316-6d8d8833ae65.json | 2 +- .../c49077d9-90b3-437f-b316-6d8d8833ae72.json | 2 +- .../c49077d9-90b3-437f-b316-6d8d8833ae73.json | 11 ----------- .../c49077d9-90b3-437f-b316-6d8d8833ae75.json | 11 ----------- .../c49077d9-90b3-437f-b316-6d8d8833ae76.json | 4 ++-- .../c49077d9-90b3-437f-b316-6d8d8833ae77.json | 6 +++--- .../f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json | 2 +- 24 files changed, 28 insertions(+), 116 deletions(-) delete mode 100644 generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json delete mode 100644 generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json delete mode 100644 generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json delete mode 100644 generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json delete mode 100644 generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json delete mode 100644 generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json delete mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json delete mode 100644 generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json diff --git a/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json index 4a4e87779a51..28cd581df6e4 100644 --- a/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json +++ b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json @@ -4,9 +4,9 @@ "serviceName": "S3", "type": "patch", "changeLogMessages": [ - "Added MaxInMemoryParts property to TransferUtilityOpenStreamRequest for controlling memory usage during multipart downloads", - "Added PartSize property to BaseDownloadRequest for configuring multipart download part sizes", - "Added MultipartDownloadType enum and property to BaseDownloadRequest for selecting download strategy" + "Added MaxInMemoryParts property to the Amazon.S3.Transfer.TransferUtilityOpenStreamRequest class for controlling memory usage during multipart downloads", + "Added PartSize property to the Amazon.S3.Transfer.BaseDownloadRequest class for configuring multipart download part sizes", + "Added MultipartDownloadType enum and property to the Amazon.S3.Transfer.BaseDownloadRequest class for selecting download strategy" ] } ] diff --git a/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json b/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json deleted file mode 100644 index 3c1fff65ffab..000000000000 --- a/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "minor", - "changeLogMessages": [ - "Add GetObjectResponse to TransferUtilityDownloadResponse mapping." - ] - } - ] -} \ No newline at end of file diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json index e99cbe1c4bc1..1c6a07f688f3 100644 --- a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent for non multipart uploads." + "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent events to the Amazon.S3.Transfer.TransferUtility.Upload operation for non-multipart uploads." ] - } + } ] } diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json index 1790a068cfae..e5c52a3bcb6e 100644 --- a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json @@ -4,7 +4,7 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent for multipart uploads." + "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent events to the Amazon.S3.Transfer.TransferUtility.Upload operation for multipart uploads." ] } ] diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json deleted file mode 100644 index 166d9469d903..000000000000 --- a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e4.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "patch", - "changeLogMessages": [ - "Added PutObjectResponse to TransferUtilityUploadResponse mapping" - ] - } - ] -} diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json deleted file mode 100644 index 5d67e3a8b858..000000000000 --- a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e6.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "patch", - "changeLogMessages": [ - "Added CompleteMultipartUploadResponse to TransferUtilityUploadResponse mapping" - ] - } - ] -} diff --git a/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json b/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json index 0416619a5fb5..4fb704cc474c 100644 --- a/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json +++ b/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json @@ -4,7 +4,7 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Created new DownloadDirectoryWithResponseAsync methods on the Amazon.S3.Transfer.TransferUtility class. The new operations support downloading directories using multipart download for files and return response metadata." + "Added DownloadDirectoryWithResponse methods to the Amazon.S3.Transfer.ITransferUtility interface. The new operations support downloading directories using multipart download for files and return response metadata." ] } ] diff --git a/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json index be509aae4368..1ca9ea6953b9 100644 --- a/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json +++ b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json @@ -4,7 +4,7 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Create new UploadWithResponse API that returns response metadata information for transfer utility." + "Created new UploadWithResponse methods on the Amazon.S3.Transfer.TransferUtility class that return response metadata information." ] } ] diff --git a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json deleted file mode 100644 index 564bb1cd65d5..000000000000 --- a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb231.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "patch", - "changeLogMessages": [ - "Add GetObjectResponse to TransferUtilityDownloadResponse mapping." - ] - } - ] -} diff --git a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json index 179605fd4aab..9da2db586cf2 100644 --- a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json +++ b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Added DownloadDirectoryInitiatedEvent, DownloadDirectoryCompletedEvent, and DownloadDirectoryFailedEvent for Amazon.S3.Transfer.TransferUtility.DownloadDirectory." + "Added DownloadDirectoryInitiatedEvent, DownloadDirectoryCompletedEvent, and DownloadDirectoryFailedEvent events to the Amazon.S3.Transfer.ITransferUtility.DownloadDirectory operation." ] } ] -} \ No newline at end of file +} diff --git a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json deleted file mode 100644 index 04d1122899c8..000000000000 --- a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb238.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "patch", - "changeLogMessages": [ - "Update Response mapping logic for PutObjectResponse and CompleteMultipartResponse" - ] - } - ] -} diff --git a/generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json b/generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json deleted file mode 100644 index a1fb17cb3107..000000000000 --- a/generator/.DevConfigs/984a2bde-687f-4ed1-b6eb-03f15b257416.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "patch", - "changeLogMessages": [ - "Added progress tracking events to multipart download operations" - ] - } - ] -} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json index 92a91ce271dd..c8bf4811b2fc 100644 --- a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Created new OpenStreamWithResponseAsync method on the Amazon.S3.Transfer.TransferUtility class. The new operation supports downloading in parallel parts of the S3 object in the background while reading from the stream for improved performance." + "Added OpenStreamWithResponse method to the Amazon.S3.Transfer.ITransferUtility interface. The new operation supports downloading in parallel parts of the S3 object in the background while reading from the stream for improved performance." ] } ] -} \ No newline at end of file +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json index 6793c5b842ce..150ad4a52c6a 100644 --- a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Created new DownloadWithResponseAsync method on the Amazon.S3.Transfer.TransferUtility class. The new operation supports downloading in parallel parts of the S3 object to a file for improved performance." + "Added DownloadWithResponse method to the Amazon.S3.Transfer.ITransferUtility interface. The new operation supports downloading in parallel parts of the S3 object to a file for improved performance." ] } ] -} \ No newline at end of file +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json index b2cafff31230..118b7b6e48c1 100644 --- a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Created new UploadDirectoryWithResponseAsync method on the Amazon.S3.Transfer.TransferUtility class." + "Added UploadDirectoryWithResponse method to the Amazon.S3.Transfer.ITransferUtility interface." ] } ] -} \ No newline at end of file +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json index 1838e718b587..62482b15f9c8 100644 --- a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Added DownloadInitiatedEvent, DownloadCompletedEvent, and DownloadFailedEvent for TransferUtility Download." + "Added DownloadInitiatedEvent, DownloadCompletedEvent, and DownloadFailedEvent events to the Amazon.S3.Transfer.TransferUtility.Download operation." ] } ] -} \ No newline at end of file +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json index d5508da3272f..540544edfe00 100644 --- a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Added UploadDirectoryInitiatedEvent, UploadDirectoryCompletedEvent, and UploadDirectoryFailedEvent for Amazon.S3.Transfer.TransferUtility.UploadDirectory." + "Added UploadDirectoryInitiatedEvent, UploadDirectoryCompletedEvent, and UploadDirectoryFailedEvent events to the Amazon.S3.Transfer.ITransferUtility.UploadDirectory operation." ] } ] -} \ No newline at end of file +} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json index 1e2e348d0f87..ccd3be4c1bf2 100644 --- a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json @@ -4,7 +4,7 @@ "serviceName": "S3", "type": "patch", "changeLogMessages": [ - "Fix Transfer Utility internal Logger recursive property definition" + "Fixed recursive property definition in the Amazon.S3.Transfer.TransferUtility internal Logger implementation" ] } ] diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json index ee368d5cf126..21832b0bc135 100644 --- a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json @@ -4,7 +4,7 @@ "serviceName": "S3", "type": "patch", "changeLogMessages": [ - "Add ContentLanguage to header collection of GetObjectResponse." + "Added ContentLanguage property to the header collection of the Amazon.S3.Model.GetObjectResponse class." ] } ] diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json deleted file mode 100644 index 3a1b9218e539..000000000000 --- a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae73.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "patch", - "changeLogMessages": [ - "Populate TransferUtilityDownloadDirectoryResponse with total objects downloaded" - ] - } - ] -} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json deleted file mode 100644 index 999c11e35b3e..000000000000 --- a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "services": [ - { - "serviceName": "S3", - "type": "patch", - "changeLogMessages": [ - "Remove AmazonWebServiceResponse as base class for transfer utility repsonse objects." - ] - } - ] -} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json index ac2ef799e36d..1b73fcde33c5 100644 --- a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json @@ -4,8 +4,8 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Add FailurePolicy property to TransferUtilityDownloadDirectoryRequest to allow configuration of failure handling behavior during directory downloads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue downloading remaining files when a failure occurs.", - "Add ObjectDownloadFailedEvent event to TransferUtilityDownloadDirectory to notify users when an individual file download fails during a directory download operation. This event provides details about the failed download, including the original request, the specific file request and the exception encountered." + "Added FailurePolicy property to the Amazon.S3.Transfer.TransferUtilityDownloadDirectoryRequest class to allow configuration of failure handling behavior during directory downloads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue downloading remaining files when a failure occurs.", + "Added ObjectDownloadFailedEvent event to the Amazon.S3.Transfer.TransferUtility.DownloadDirectory operation to notify users when an individual file download fails during a directory download operation. This event provides details about the failed download, including the original request, the specific file request and the exception encountered." ] } ] diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json index e75662d5e8d0..8539da002f0a 100644 --- a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json @@ -4,9 +4,9 @@ "serviceName": "S3", "type": "minor", "changeLogMessages": [ - "Add FailurePolicy property to TransferUtilityUploadDirectoryRequest to allow configuration of failure handling behavior during directory uploads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue uploading remaining files when a failure occurs.", - "Add ObjectUploadFailedEvent event to TransferUtilityUploadDirectoryRequest to notify users when an individual file upload fails during a directory upload operation. This event provides details about the failed upload, including the original request, the specific file request and the exception encountered." + "Added FailurePolicy property to the Amazon.S3.Transfer.TransferUtilityUploadDirectoryRequest class to allow configuration of failure handling behavior during directory uploads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue uploading remaining files when a failure occurs.", + "Added ObjectUploadFailedEvent event to the Amazon.S3.Transfer.TransferUtility.UploadDirectory operation to notify users when an individual file upload fails during a directory upload operation. This event provides details about the failed upload, including the original request, the specific file request and the exception encountered." ] } ] -} \ No newline at end of file +} diff --git a/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json b/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json index bc5e6350ecb8..ba09978119be 100644 --- a/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json +++ b/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json @@ -4,7 +4,7 @@ "serviceName": "S3", "type": "patch", "changeLogMessages": [ - "Added UploadWithResponse and UploadWithResponseAsync methods to ITransferUtility interface" + "Added UploadWithResponse and UploadWithResponseAsync methods to the Amazon.S3.Transfer.ITransferUtility interface" ] } ] From 9647310eff5c2691ecbcf46eaa7a586b4b87d97f Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 12 Dec 2025 10:54:34 -0500 Subject: [PATCH 53/56] Doc updates (#4229) --- ...TransferUtilityDownloadDirectoryRequest.cs | 1 + .../Transfer/_async/ITransferUtility.async.cs | 86 ++++++++++++++++++- .../_bcl+netstandard/ITransferUtility.sync.cs | 86 ++++++++++++++++++- 3 files changed, 169 insertions(+), 4 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs index c36b8e03cb3d..7185e0d0cb22 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs @@ -318,6 +318,7 @@ internal bool IsSetUnmodifiedSinceDate() /// Specifies if multiple files will be downloaded concurrently. /// The number of concurrent web requests used is controlled /// by the TransferUtilityConfig.ConcurrencyLevel property. + /// The default value is false. /// #if BCL || NETSTANDARD public diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs index ac041a5e4956..2cf68c4d14d4 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs @@ -343,6 +343,26 @@ public partial interface ITransferUtility : IDisposable /// Downloaded parts are written directly to the file as they arrive /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -393,6 +413,26 @@ public partial interface ITransferUtility : IDisposable /// Downloaded parts are written directly to the file as they arrive /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -418,7 +458,8 @@ public partial interface ITransferUtility : IDisposable /// BucketName = "my-bucket", /// Key = "my-key", /// FilePath = "local-file.txt", - /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize /// }; /// var response = await transferUtility.DownloadWithResponseAsync(request); /// @@ -534,6 +575,26 @@ public partial interface ITransferUtility : IDisposable /// Downloaded parts are buffered in memory and served to your application as you read from the stream /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -589,6 +650,26 @@ public partial interface ITransferUtility : IDisposable /// Downloaded parts are buffered in memory and served to your application as you read from the stream /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -615,7 +696,8 @@ public partial interface ITransferUtility : IDisposable /// { /// BucketName = "my-bucket", /// Key = "my-key", - /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize /// }; /// var response = await transferUtility.OpenStreamWithResponseAsync(request); /// diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index b984fa70b800..6f21007f148b 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -360,6 +360,26 @@ public partial interface ITransferUtility /// Downloaded parts are buffered in memory and served to your application as you read from the stream /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -414,6 +434,26 @@ public partial interface ITransferUtility /// Downloaded parts are buffered in memory and served to your application as you read from the stream /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -440,7 +480,8 @@ public partial interface ITransferUtility /// { /// BucketName = "my-bucket", /// Key = "my-key", - /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize /// }; /// var response = transferUtility.OpenStreamWithResponse(request); /// @@ -516,6 +557,26 @@ public partial interface ITransferUtility /// Downloaded parts are written directly to the file as they arrive /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -563,6 +624,26 @@ public partial interface ITransferUtility /// Downloaded parts are written directly to the file as they arrive /// /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// /// Configuration Options: /// /// @@ -588,7 +669,8 @@ public partial interface ITransferUtility /// BucketName = "my-bucket", /// Key = "my-key", /// FilePath = "local-file.txt", - /// PartSize = 16 * 1024 * 1024 // Use 16MB parts instead of default 8MB + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize /// }; /// var response = transferUtility.DownloadWithResponse(request); /// From 77c79725822605cad06f4b1607001ad1031d2c4e Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Fri, 12 Dec 2025 13:54:10 -0500 Subject: [PATCH 54/56] refactor (#4233) --- .../Internal/MultipartDownloadManager.cs | 340 ++++++++++-------- 1 file changed, 183 insertions(+), 157 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index 1537d48c58da..ecdcde369441 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -306,7 +306,6 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E _logger.DebugFormat("MultipartDownloadManager: Starting downloads - TotalParts={0}, IsSinglePart={1}", discoveryResult.TotalParts, discoveryResult.IsSinglePart); - var downloadTasks = new List(); var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); try @@ -316,39 +315,8 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E ? new EventHandler(DownloadPartProgressEventCallback) : null; - try - { - // Prepare the data handler (e.g., create temp files for file-based downloads) - await _dataHandler.PrepareAsync(discoveryResult, cancellationToken).ConfigureAwait(false); - - // Attach progress callback to Part 1's response if provided - if (wrappedCallback != null) - { - discoveryResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; - } - - // Process Part 1 from InitialResponse (applies to both single-part and multipart) - // NOTE: Semaphore is still held from discovery phase and will be released in finally block - _logger.DebugFormat("MultipartDownloadManager: Processing Part 1 from discovery response"); - await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); - - _logger.DebugFormat("MultipartDownloadManager: Part 1 processing completed"); - } - finally - { - // Always detach the event handler to prevent memory leak - if (wrappedCallback != null) - { - discoveryResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; - } - - // Release semaphore after BOTH network download AND disk write complete for Part 1 - // This ensures ConcurrentServiceRequests controls the entire I/O operation, - // consistent with Parts 2+ (see CreateDownloadTaskAsync) - _httpConcurrencySlots.Release(); - _logger.DebugFormat("MultipartDownloadManager: [Part 1] HTTP concurrency slot released (Available: {0}/{1})", - _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); - } + // Process Part 1 (downloaded during discovery) + await ProcessFirstPartAsync(discoveryResult, wrappedCallback, cancellationToken).ConfigureAwait(false); if (discoveryResult.IsSinglePart) { @@ -361,116 +329,12 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E // Check if already cancelled before creating background task cancellationToken.ThrowIfCancellationRequested(); - // Start background task to handle capacity acquisition and task creation + // Start background task to handle remaining parts // This allows the method to return immediately so the consumer can start reading // which prevents deadlock when MaxInMemoryParts is reached before consumer begins reading _downloadCompletionTask = Task.Run(async () => { - try - { - _logger.DebugFormat("MultipartDownloadManager: Background task starting capacity acquisition and downloads"); - - // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) - _logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", - discoveryResult.TotalParts); - - // Pre-acquire capacity in sequential order to prevent race condition deadlock - // This ensures Part 2 gets capacity before Part 3, etc., preventing out-of-order - // parts from consuming all buffer slots and blocking the next expected part - for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) - { - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); - - // Acquire capacity sequentially - guarantees Part 2 before Part 3, etc. - await _dataHandler.WaitForCapacityAsync(internalCts.Token).ConfigureAwait(false); - - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); - - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", - partNum, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); - - // Acquire HTTP slot in the loop before creating task - // Loop will block here if all slots are in use - await _httpConcurrencySlots.WaitAsync(internalCts.Token).ConfigureAwait(false); - - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNum); - - try - { - var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); - downloadTasks.Add(task); - } - catch (Exception ex) - { - // If task creation fails, release the HTTP slot we just acquired - _httpConcurrencySlots.Release(); - _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released due to task creation failure: {1}", partNum, ex); - throw; - } - } - - var expectedTaskCount = downloadTasks.Count; - _logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); - - // Wait for all downloads to complete (fails fast on first exception) - await TaskHelpers.WhenAllOrFirstExceptionAsync(downloadTasks, internalCts.Token).ConfigureAwait(false); - - _logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); - - // SEP Part GET Step 6 / Ranged GET Step 8: - // "validate that the total number of part GET requests sent matches with the expected PartsCount" - // Note: This should always be true if we reach this point, since WhenAllOrFirstException - // ensures all tasks completed successfully (or threw on first failure). - // The check serves as a defensive assertion for SEP compliance. - // Note: expectedTaskCount + 1 accounts for Part 1 being buffered during discovery - if (expectedTaskCount + 1 != discoveryResult.TotalParts) - { - throw new InvalidOperationException( - $"Request count mismatch. Expected {discoveryResult.TotalParts} parts, " + - $"but sent {expectedTaskCount + 1} requests"); - } - - // Mark successful completion - _logger.InfoFormat("MultipartDownloadManager: Download completed successfully - TotalParts={0}", - discoveryResult.TotalParts); - _dataHandler.OnDownloadComplete(null); - } - #pragma warning disable CA1031 // Do not catch general exception types - - catch (Exception ex) - { - _downloadException = ex; - - - - // Cancel all remaining downloads immediately to prevent cascading timeout errors - // This ensures that when one part fails, other tasks stop gracefully instead of - // continuing until they hit their own timeout/cancellation errors - // Check if cancellation was already requested to avoid ObjectDisposedException - if (!internalCts.IsCancellationRequested) - { - try - { - internalCts.Cancel(); - _logger.DebugFormat("MultipartDownloadManager: Cancelled all in-flight downloads due to error"); - } - catch (ObjectDisposedException) - { - // CancellationTokenSource was already disposed, ignore - _logger.DebugFormat("MultipartDownloadManager: CancellationTokenSource already disposed during cancellation"); - } - } - - _dataHandler.OnDownloadComplete(ex); - throw; - } - #pragma warning restore CA1031 // Do not catch general exception types - finally - { - // Dispose the CancellationTokenSource after all background operations complete - // This ensures the token remains valid for the entire lifetime of download tasks - internalCts.Dispose(); - } + await StartBackgroundDownloadsAsync(discoveryResult, wrappedCallback, internalCts).ConfigureAwait(false); }, cancellationToken); // Return immediately to allow consumer to start reading @@ -482,23 +346,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E _downloadException = ex; _logger.Error(ex, "MultipartDownloadManager: Download failed"); - // Cancel all remaining downloads immediately to prevent cascading timeout errors - // Check if cancellation was already requested to avoid ObjectDisposedException - if (!internalCts.IsCancellationRequested) - { - try - { - internalCts.Cancel(); - _logger.DebugFormat("MultipartDownloadManager: Cancelled all in-flight downloads due to error"); - } - catch (ObjectDisposedException) - { - // CancellationTokenSource was already disposed, ignore - _logger.DebugFormat("MultipartDownloadManager: CancellationTokenSource already disposed during cancellation"); - } - } - - _dataHandler.OnDownloadComplete(ex); + HandleDownloadError(ex, internalCts); // Dispose the CancellationTokenSource if background task was never started // This handles the case where an error occurs before Task.Run is called @@ -510,6 +358,184 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E + /// + /// Processes Part 1 (downloaded during discovery) including preparation, progress tracking, and semaphore release. + /// + private async Task ProcessFirstPartAsync(DownloadDiscoveryResult discoveryResult, EventHandler wrappedCallback, CancellationToken cancellationToken) + { + try + { + // Prepare the data handler (e.g., create temp files for file-based downloads) + await _dataHandler.PrepareAsync(discoveryResult, cancellationToken).ConfigureAwait(false); + + // Attach progress callback to Part 1's response if provided + if (wrappedCallback != null) + { + discoveryResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; + } + + // Process Part 1 from InitialResponse (applies to both single-part and multipart) + // NOTE: Semaphore is still held from discovery phase and will be released in finally block + _logger.DebugFormat("MultipartDownloadManager: Processing Part 1 from discovery response"); + await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: Part 1 processing completed"); + } + finally + { + // Always detach the event handler to prevent memory leak + if (wrappedCallback != null) + { + discoveryResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; + } + + // Release semaphore after BOTH network download AND disk write complete for Part 1 + // This ensures ConcurrentServiceRequests controls the entire I/O operation, + // consistent with Parts 2+ (see CreateDownloadTaskAsync) + _httpConcurrencySlots.Release(); + _logger.DebugFormat("MultipartDownloadManager: [Part 1] HTTP concurrency slot released (Available: {0}/{1})", + _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + } + } + + /// + /// Starts background downloads for remaining parts (Part 2+) in a multipart download. + /// Handles capacity acquisition, task creation, completion validation, and error handling. + /// + private async Task StartBackgroundDownloadsAsync(DownloadDiscoveryResult discoveryResult, EventHandler wrappedCallback, CancellationTokenSource internalCts) + { + var downloadTasks = new List(); + + try + { + _logger.DebugFormat("MultipartDownloadManager: Background task starting capacity acquisition and downloads"); + + // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) + _logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", + discoveryResult.TotalParts); + + // Create download tasks for all remaining parts + await CreateDownloadTasksAsync(discoveryResult, wrappedCallback, internalCts, downloadTasks).ConfigureAwait(false); + + var expectedTaskCount = downloadTasks.Count; + _logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); + + // Wait for all downloads to complete (fails fast on first exception) + await TaskHelpers.WhenAllOrFirstExceptionAsync(downloadTasks, internalCts.Token).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); + + // Validate completion and mark successful + ValidateDownloadCompletion(expectedTaskCount, discoveryResult.TotalParts); + + // Mark successful completion + _logger.InfoFormat("MultipartDownloadManager: Download completed successfully - TotalParts={0}", + discoveryResult.TotalParts); + _dataHandler.OnDownloadComplete(null); + } + #pragma warning disable CA1031 // Do not catch general exception types + catch (Exception ex) + { + _downloadException = ex; + HandleDownloadError(ex, internalCts); + throw; + } + #pragma warning restore CA1031 // Do not catch general exception types + finally + { + // Dispose the CancellationTokenSource after all background operations complete + // This ensures the token remains valid for the entire lifetime of download tasks + internalCts.Dispose(); + } + } + + /// + /// Creates download tasks for all remaining parts (Part 2+) with sequential capacity acquisition. + /// Pre-acquires capacity in sequential order to prevent race condition deadlock. + /// + private async Task CreateDownloadTasksAsync(DownloadDiscoveryResult discoveryResult, EventHandler wrappedCallback, CancellationTokenSource internalCts, List downloadTasks) + { + // Pre-acquire capacity in sequential order to prevent race condition deadlock + // This ensures Part 2 gets capacity before Part 3, etc., preventing out-of-order + // parts from consuming all buffer slots and blocking the next expected part + for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) + { + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); + + // Acquire capacity sequentially - guarantees Part 2 before Part 3, etc. + await _dataHandler.WaitForCapacityAsync(internalCts.Token).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", + partNum, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + + // Acquire HTTP slot in the loop before creating task + // Loop will block here if all slots are in use + await _httpConcurrencySlots.WaitAsync(internalCts.Token).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNum); + + try + { + var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); + downloadTasks.Add(task); + } + catch (Exception ex) + { + // If task creation fails, release the HTTP slot we just acquired + _httpConcurrencySlots.Release(); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released due to task creation failure: {1}", partNum, ex); + throw; + } + } + } + + /// + /// Validates that the expected number of parts were downloaded for SEP compliance. + /// + private void ValidateDownloadCompletion(int expectedTaskCount, int totalParts) + { + // SEP Part GET Step 6 / Ranged GET Step 8: + // "validate that the total number of part GET requests sent matches with the expected PartsCount" + // Note: This should always be true if we reach this point, since WhenAllOrFirstException + // ensures all tasks completed successfully (or threw on first failure). + // The check serves as a defensive assertion for SEP compliance. + // Note: expectedTaskCount + 1 accounts for Part 1 being buffered during discovery + if (expectedTaskCount + 1 != totalParts) + { + throw new InvalidOperationException( + $"Request count mismatch. Expected {totalParts} parts, " + + $"but sent {expectedTaskCount + 1} requests"); + } + } + + /// + /// Handles download errors by cancelling remaining downloads and notifying the data handler. + /// + private void HandleDownloadError(Exception ex, CancellationTokenSource internalCts) + { + // Cancel all remaining downloads immediately to prevent cascading timeout errors + // This ensures that when one part fails, other tasks stop gracefully instead of + // continuing until they hit their own timeout/cancellation errors + // Check if cancellation was already requested to avoid ObjectDisposedException + if (!internalCts.IsCancellationRequested) + { + try + { + internalCts.Cancel(); + _logger.DebugFormat("MultipartDownloadManager: Cancelled all in-flight downloads due to error"); + } + catch (ObjectDisposedException) + { + // CancellationTokenSource was already disposed, ignore + _logger.DebugFormat("MultipartDownloadManager: CancellationTokenSource already disposed during cancellation"); + } + } + + _dataHandler.OnDownloadComplete(ex); + } + private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, EventHandler progressCallback, CancellationToken cancellationToken) { GetObjectResponse response = null; From 21ae4da6bd5e74a62e4d42a57826fc49a2aec062 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Tue, 16 Dec 2025 09:39:38 -0500 Subject: [PATCH 55/56] code refactor src (#4235) --- .../Internal/BufferedMultipartStream.cs | 18 +- .../Internal/BufferedPartDataHandler.cs | 2 +- .../Transfer/Internal/FilePartDataHandler.cs | 2 +- .../Transfer/Internal/IDownloadManager.cs | 41 +- .../Transfer/Internal/IPartDataHandler.cs | 2 +- .../Internal/MultipartDownloadManager.cs | 165 +++-- .../_async/MultipartDownloadCommand.async.cs | 30 +- .../Custom/BufferedMultipartStreamTests.cs | 85 +-- .../FilePartDataHandlerConcurrencyTests.cs | 4 +- .../Custom/FilePartDataHandlerTests.cs | 56 +- .../Custom/MultipartDownloadManagerTests.cs | 615 +++++++++--------- 11 files changed, 492 insertions(+), 528 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs index 7093c2aa6a7b..fa3616a34f3d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs @@ -43,16 +43,16 @@ internal class BufferedMultipartStream : Stream private bool _initialized = false; private bool _disposed = false; - private DownloadDiscoveryResult _discoveryResult; + private DownloadResult _discoveryResult; private long _totalBytesRead = 0; private readonly Logger _logger = Logger.GetLogger(typeof(BufferedMultipartStream)); /// - /// Gets the containing metadata from the initial GetObject response. + /// Gets the containing metadata from the initial GetObject response. /// Available after completes successfully. /// - public DownloadDiscoveryResult DiscoveryResult => _discoveryResult; + public DownloadResult DiscoveryResult => _discoveryResult; /// /// Creates a new with dependency injection. @@ -112,16 +112,14 @@ public async Task InitializeAsync(CancellationToken cancellationToken) _logger.DebugFormat("BufferedMultipartStream: Starting initialization"); - _discoveryResult = await _downloadCoordinator.DiscoverDownloadStrategyAsync(cancellationToken) - .ConfigureAwait(false); - - _logger.DebugFormat("BufferedMultipartStream: Discovery completed - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", + // Start unified download operation (discovers strategy and starts downloads) + _discoveryResult = await _downloadCoordinator.StartDownloadAsync(null, cancellationToken) + .ConfigureAwait(false); + + _logger.DebugFormat("BufferedMultipartStream: Download started - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", _discoveryResult.ObjectSize, _discoveryResult.TotalParts, _discoveryResult.IsSinglePart); - - await _downloadCoordinator.StartDownloadsAsync(_discoveryResult, null, cancellationToken) - .ConfigureAwait(false); _initialized = true; _logger.DebugFormat("BufferedMultipartStream: Initialization completed successfully"); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs index 256a0228d086..2bb4cf198eb6 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -71,7 +71,7 @@ public BufferedPartDataHandler( _config = config ?? throw new ArgumentNullException(nameof(config)); } - public Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken) + public Task PrepareAsync(DownloadResult discoveryResult, CancellationToken cancellationToken) { // No preparation needed for buffered handler - buffers are created on demand return Task.CompletedTask; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs index 4d7415a4a8f5..d85556a34ecc 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs @@ -55,7 +55,7 @@ public FilePartDataHandler(FileDownloadConfiguration config) } /// - public Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken) + public Task PrepareAsync(DownloadResult discoveryResult, CancellationToken cancellationToken) { // Create temporary file once during preparation phase _tempFilePath = _fileHandler.CreateTemporaryFile(_config.DestinationFilePath); diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs index 86f7240988a1..662076ded6da 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs @@ -32,23 +32,29 @@ namespace Amazon.S3.Transfer.Internal internal interface IDownloadManager : IDisposable { /// - /// Discovers whether the object requires single-part or multipart downloading. + /// Discovers the download strategy and starts concurrent downloads in a single operation. + /// This unified method eliminates resource leakage by managing HTTP slots and buffer capacity + /// internally throughout the entire download lifecycle. /// - /// A token to cancel the discovery operation. - /// - /// A task containing discovery results including total parts, object size, - /// and initial response data if single-part. - /// - Task DiscoverDownloadStrategyAsync(CancellationToken cancellationToken); - - /// - /// Starts concurrent downloads with HTTP concurrency control and part range calculations. - /// - /// Results from the discovery phase. /// Optional callback for progress tracking events. /// A token to cancel the download operation. - /// A task that completes when all downloads finish or an error occurs. - Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, EventHandler progressCallback, CancellationToken cancellationToken); + /// + /// A task containing download results including total parts, object size, + /// and initial response data. + /// + /// + /// This method performs both discovery and download operations atomically: + /// 1. Acquires HTTP slot and buffer capacity + /// 2. Makes initial GetObject request to discover download strategy + /// 3. Processes Part 1 immediately + /// 4. Starts background downloads for remaining parts (if multipart) + /// 5. Returns after Part 1 is processed, allowing consumer to begin reading + /// + /// Resources (HTTP slots, buffer capacity) are managed internally and released + /// at the appropriate times, eliminating the awkward resource holding that existed + /// with the previous two-method API. + /// + Task StartDownloadAsync(EventHandler progressCallback, CancellationToken cancellationToken); /// /// Exception that occurred during downloads, if any. @@ -57,9 +63,9 @@ internal interface IDownloadManager : IDisposable } /// - /// Download discovery results with metadata for determining download strategy. + /// Download results with metadata about the completed discovery and initial download. /// - internal class DownloadDiscoveryResult + internal class DownloadResult { /// /// Total parts needed (1 = single-part, >1 = multipart). @@ -72,7 +78,8 @@ internal class DownloadDiscoveryResult public long ObjectSize { get; set; } /// - /// GetObjectResponse obtained during download initialization, containing the ResponseStream. Represents the complete object for single-part downloads or the first range/part for multipart downloads. + /// GetObjectResponse obtained during download initialization, containing the ResponseStream. + /// Represents the complete object for single-part downloads or the first range/part for multipart downloads. /// public GetObjectResponse InitialResponse { get; set; } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs index 864a49acbaa7..43cdce2075f6 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs @@ -40,7 +40,7 @@ internal interface IPartDataHandler : IDisposable /// Discovery result containing object metadata /// Cancellation token /// Task that completes when preparation is done - Task PrepareAsync(DownloadDiscoveryResult discoveryResult, CancellationToken cancellationToken); + Task PrepareAsync(DownloadResult discoveryResult, CancellationToken cancellationToken); /// /// Process a downloaded part from the GetObjectResponse. diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index ecdcde369441..ef50b5f4a7bf 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -133,8 +133,7 @@ public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, /// /// /// - /// - /// + /// /// Thrown when using S3 encryption client, which does not support multipart downloads. public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler, SemaphoreSlim sharedHttpThrottler) { @@ -177,42 +176,61 @@ public Exception DownloadException } /// - /// Discovers the download strategy (single-part vs multipart) by making an initial GetObject request. + /// Discovers the download strategy and starts concurrent downloads in a single unified operation. + /// This eliminates resource leakage by managing HTTP slots and buffer capacity internally. /// - /// Cancellation token to cancel the discovery operation. + /// Optional callback for progress tracking events. + /// A token to cancel the download operation. /// - /// A containing information about the object size, part count, + /// A containing information about the object size, part count, /// and the initial GetObject response. /// /// - /// IMPORTANT - HTTP Semaphore Lifecycle: - /// - /// This method acquires an HTTP concurrency slot from the configured semaphore and downloads Part 1. - /// The semaphore slot is HELD until completes processing Part 1. - /// Callers MUST call after this method to release the semaphore. - /// Failure to call will cause the semaphore slot to remain held indefinitely, - /// potentially blocking other downloads and causing deadlocks. - /// - /// Concurrency Implications: - /// - /// With limited HTTP concurrency (e.g., ConcurrentServiceRequests=1 for shared throttlers in directory downloads), - /// concurrent calls to this method will block until previous downloads complete their full lifecycle - /// (discover → start). This is by design to ensure the entire I/O operation (network + disk) is - /// within the concurrency limit. For single-slot throttlers, downloads must be processed sequentially: - /// complete one download's full lifecycle before starting the next. - /// - /// Typical Usage Pattern: - /// - /// var discovery = await manager.DiscoverDownloadStrategyAsync(cancellationToken); - /// await manager.StartDownloadsAsync(discovery, progressCallback, cancellationToken); - /// await manager.DownloadCompletionTask; // Wait for multipart downloads to finish - /// + /// This method performs both discovery and download operations atomically: + /// 1. Acquires HTTP slot and buffer capacity + /// 2. Makes initial GetObject request to discover download strategy + /// 3. Processes Part 1 immediately + /// 4. Starts background downloads for remaining parts (if multipart) + /// 5. Returns after Part 1 is processed, allowing consumer to begin reading + /// + /// Resources (HTTP slots, buffer capacity) are managed internally and released + /// at the appropriate times /// /// Thrown if the manager has been disposed. - /// Thrown if discovery has already been performed. + /// Thrown if download has already been started. /// Thrown if the operation is cancelled. /// - public async Task DiscoverDownloadStrategyAsync(CancellationToken cancellationToken) + public async Task StartDownloadAsync(EventHandler progressCallback, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (_discoveryCompleted) + throw new InvalidOperationException("Download has already been started"); + + // Step 1: Perform discovery (acquires resources, downloads Part 1) + var discoveryResult = await PerformDiscoveryAsync(cancellationToken).ConfigureAwait(false); + + // Step 2: Process Part 1 and start remaining downloads + await PerformDownloadsAsync(discoveryResult, progressCallback, cancellationToken).ConfigureAwait(false); + + // Step 3: Return results to caller + return discoveryResult; + } + + /// + /// Performs the discovery phase by making an initial GetObject request. + /// + /// Cancellation token to cancel the discovery operation. + /// + /// A containing information about the object size, part count, + /// and the initial GetObject response. + /// + /// + /// This method acquires an HTTP concurrency slot and buffer capacity, then makes the initial + /// GetObject request to determine the download strategy. The HTTP slot is held until + /// PerformDownloadsAsync processes Part 1. + /// + private async Task PerformDiscoveryAsync(CancellationToken cancellationToken) { ThrowIfDisposed(); @@ -251,9 +269,8 @@ public async Task DiscoverDownloadStrategyAsync(Cancell /// Processes Part 1 and starts downloading remaining parts for multipart downloads. /// Returns immediately after processing Part 1 to allow the consumer to begin reading. /// - /// - /// The discovery result from containing object metadata - /// and the initial GetObject response. + /// + /// The download result from discovery containing object metadata and the initial GetObject response. /// /// /// Optional progress callback that will be invoked as parts are downloaded. For multipart downloads, @@ -265,46 +282,22 @@ public async Task DiscoverDownloadStrategyAsync(Cancell /// continue downloading in the background (monitor via ). /// /// - /// HTTP Semaphore Release: - /// - /// This method processes Part 1 (downloaded during ) - /// and releases the HTTP semaphore slot that was acquired during discovery. - /// The semaphore is released after both the network download and disk write - /// operations complete for Part 1. This ensures the ConcurrentServiceRequests limit - /// controls the entire I/O operation (network + disk), not just the network download. - /// - /// Background Processing (Multipart Only): - /// - /// For multipart downloads (when TotalParts > 1), this method starts a background task - /// to download and process remaining parts (Part 2+) and returns immediately. This allows the - /// consumer to start reading from the buffer without waiting for all downloads to complete, - /// which prevents deadlocks when the buffer fills up before the consumer begins reading. - /// Monitor to detect when all background downloads have finished. - /// - /// Single-Part Downloads: - /// - /// For single-part downloads (when TotalParts = 1), this method processes Part 1 synchronously - /// and returns immediately. No background task is created, and - /// will already be completed when this method returns. - /// + /// This is a private method called by StartDownloadAsync after discovery completes. + /// It processes Part 1 and starts background downloads for remaining parts. /// - /// Thrown if the manager has been disposed. - /// Thrown if is null. - /// Thrown if the operation is cancelled. - /// - public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, EventHandler progressCallback, CancellationToken cancellationToken) + private async Task PerformDownloadsAsync(DownloadResult downloadResult, EventHandler progressCallback, CancellationToken cancellationToken) { ThrowIfDisposed(); - if (discoveryResult == null) - throw new ArgumentNullException(nameof(discoveryResult)); + if (downloadResult == null) + throw new ArgumentNullException(nameof(downloadResult)); // Store for progress aggregation _userProgressCallback = progressCallback; - _totalObjectSize = discoveryResult.ObjectSize; + _totalObjectSize = downloadResult.ObjectSize; _logger.DebugFormat("MultipartDownloadManager: Starting downloads - TotalParts={0}, IsSinglePart={1}", - discoveryResult.TotalParts, discoveryResult.IsSinglePart); + downloadResult.TotalParts, downloadResult.IsSinglePart); var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); @@ -316,9 +309,9 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E : null; // Process Part 1 (downloaded during discovery) - await ProcessFirstPartAsync(discoveryResult, wrappedCallback, cancellationToken).ConfigureAwait(false); + await ProcessFirstPartAsync(downloadResult, wrappedCallback, cancellationToken).ConfigureAwait(false); - if (discoveryResult.IsSinglePart) + if (downloadResult.IsSinglePart) { // Single-part: Part 1 is the entire object _logger.DebugFormat("MultipartDownloadManager: Single-part download complete"); @@ -334,7 +327,7 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E // which prevents deadlock when MaxInMemoryParts is reached before consumer begins reading _downloadCompletionTask = Task.Run(async () => { - await StartBackgroundDownloadsAsync(discoveryResult, wrappedCallback, internalCts).ConfigureAwait(false); + await StartBackgroundDownloadsAsync(downloadResult, wrappedCallback, internalCts).ConfigureAwait(false); }, cancellationToken); // Return immediately to allow consumer to start reading @@ -361,23 +354,23 @@ public async Task StartDownloadsAsync(DownloadDiscoveryResult discoveryResult, E /// /// Processes Part 1 (downloaded during discovery) including preparation, progress tracking, and semaphore release. /// - private async Task ProcessFirstPartAsync(DownloadDiscoveryResult discoveryResult, EventHandler wrappedCallback, CancellationToken cancellationToken) + private async Task ProcessFirstPartAsync(DownloadResult downloadResult, EventHandler wrappedCallback, CancellationToken cancellationToken) { try { // Prepare the data handler (e.g., create temp files for file-based downloads) - await _dataHandler.PrepareAsync(discoveryResult, cancellationToken).ConfigureAwait(false); + await _dataHandler.PrepareAsync(downloadResult, cancellationToken).ConfigureAwait(false); // Attach progress callback to Part 1's response if provided if (wrappedCallback != null) { - discoveryResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; + downloadResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; } // Process Part 1 from InitialResponse (applies to both single-part and multipart) // NOTE: Semaphore is still held from discovery phase and will be released in finally block _logger.DebugFormat("MultipartDownloadManager: Processing Part 1 from discovery response"); - await _dataHandler.ProcessPartAsync(1, discoveryResult.InitialResponse, cancellationToken).ConfigureAwait(false); + await _dataHandler.ProcessPartAsync(1, downloadResult.InitialResponse, cancellationToken).ConfigureAwait(false); _logger.DebugFormat("MultipartDownloadManager: Part 1 processing completed"); } @@ -386,7 +379,7 @@ private async Task ProcessFirstPartAsync(DownloadDiscoveryResult discoveryResult // Always detach the event handler to prevent memory leak if (wrappedCallback != null) { - discoveryResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; + downloadResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; } // Release semaphore after BOTH network download AND disk write complete for Part 1 @@ -402,7 +395,7 @@ private async Task ProcessFirstPartAsync(DownloadDiscoveryResult discoveryResult /// Starts background downloads for remaining parts (Part 2+) in a multipart download. /// Handles capacity acquisition, task creation, completion validation, and error handling. /// - private async Task StartBackgroundDownloadsAsync(DownloadDiscoveryResult discoveryResult, EventHandler wrappedCallback, CancellationTokenSource internalCts) + private async Task StartBackgroundDownloadsAsync(DownloadResult downloadResult, EventHandler wrappedCallback, CancellationTokenSource internalCts) { var downloadTasks = new List(); @@ -412,10 +405,10 @@ private async Task StartBackgroundDownloadsAsync(DownloadDiscoveryResult discove // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) _logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", - discoveryResult.TotalParts); + downloadResult.TotalParts); // Create download tasks for all remaining parts - await CreateDownloadTasksAsync(discoveryResult, wrappedCallback, internalCts, downloadTasks).ConfigureAwait(false); + await CreateDownloadTasksAsync(downloadResult, wrappedCallback, internalCts, downloadTasks).ConfigureAwait(false); var expectedTaskCount = downloadTasks.Count; _logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); @@ -426,11 +419,11 @@ private async Task StartBackgroundDownloadsAsync(DownloadDiscoveryResult discove _logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); // Validate completion and mark successful - ValidateDownloadCompletion(expectedTaskCount, discoveryResult.TotalParts); + ValidateDownloadCompletion(expectedTaskCount, downloadResult.TotalParts); // Mark successful completion _logger.InfoFormat("MultipartDownloadManager: Download completed successfully - TotalParts={0}", - discoveryResult.TotalParts); + downloadResult.TotalParts); _dataHandler.OnDownloadComplete(null); } #pragma warning disable CA1031 // Do not catch general exception types @@ -453,12 +446,12 @@ private async Task StartBackgroundDownloadsAsync(DownloadDiscoveryResult discove /// Creates download tasks for all remaining parts (Part 2+) with sequential capacity acquisition. /// Pre-acquires capacity in sequential order to prevent race condition deadlock. /// - private async Task CreateDownloadTasksAsync(DownloadDiscoveryResult discoveryResult, EventHandler wrappedCallback, CancellationTokenSource internalCts, List downloadTasks) + private async Task CreateDownloadTasksAsync(DownloadResult downloadResult, EventHandler wrappedCallback, CancellationTokenSource internalCts, List downloadTasks) { // Pre-acquire capacity in sequential order to prevent race condition deadlock // This ensures Part 2 gets capacity before Part 3, etc., preventing out-of-order // parts from consuming all buffer slots and blocking the next expected part - for (int partNum = 2; partNum <= discoveryResult.TotalParts; partNum++) + for (int partNum = 2; partNum <= downloadResult.TotalParts; partNum++) { _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); @@ -478,7 +471,7 @@ private async Task CreateDownloadTasksAsync(DownloadDiscoveryResult discoveryRes try { - var task = CreateDownloadTaskAsync(partNum, discoveryResult.ObjectSize, wrappedCallback, internalCts.Token); + var task = CreateDownloadTaskAsync(partNum, downloadResult.ObjectSize, wrappedCallback, internalCts.Token); downloadTasks.Add(task); } catch (Exception ex) @@ -642,7 +635,7 @@ private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, Even } - private async Task DiscoverUsingPartStrategyAsync(CancellationToken cancellationToken) + private async Task DiscoverUsingPartStrategyAsync(CancellationToken cancellationToken) { // Check for cancellation before making any S3 calls cancellationToken.ThrowIfCancellationRequested(); @@ -693,7 +686,7 @@ private async Task DiscoverUsingPartStrategyAsync(Cance // SEP Part GET Step 7 will use this response for creating DownloadResponse // Keep the response with its stream (will be buffered in StartDownloadsAsync) - return new DownloadDiscoveryResult + return new DownloadResult { TotalParts = firstPartResponse.PartsCount.Value, ObjectSize = totalObjectSize, @@ -706,7 +699,7 @@ private async Task DiscoverUsingPartStrategyAsync(Cance _discoveredPartCount = 1; // Single part upload - return the response for immediate use (SEP Step 7) - return new DownloadDiscoveryResult + return new DownloadResult { TotalParts = 1, ObjectSize = firstPartResponse.ContentLength, @@ -723,7 +716,7 @@ private async Task DiscoverUsingPartStrategyAsync(Cance } } - private async Task DiscoverUsingRangeStrategyAsync(CancellationToken cancellationToken) + private async Task DiscoverUsingRangeStrategyAsync(CancellationToken cancellationToken) { // Check for cancellation before making any S3 calls cancellationToken.ThrowIfCancellationRequested(); @@ -771,7 +764,7 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc // No ContentRange means we got the entire small object _discoveredPartCount = 1; - return new DownloadDiscoveryResult + return new DownloadResult { TotalParts = 1, ObjectSize = firstRangeResponse.ContentLength, @@ -792,7 +785,7 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc // This request contains all of the data _discoveredPartCount = 1; - return new DownloadDiscoveryResult + return new DownloadResult { TotalParts = 1, ObjectSize = totalContentLength, @@ -815,7 +808,7 @@ private async Task DiscoverUsingRangeStrategyAsync(Canc // SEP Ranged GET Step 9 will use this response for creating DownloadResponse // Keep the response with its stream (will be buffered in StartDownloadsAsync) - return new DownloadDiscoveryResult + return new DownloadResult { TotalParts = _discoveredPartCount, ObjectSize = totalContentLength, diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs index cc58ffbbadac..c0b0a99c8709 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs @@ -62,23 +62,17 @@ public override async Task ExecuteAsync(Cancell long totalBytes = -1; try { - // Step 1: Discover download strategy (PART or RANGE) and get metadata - _logger.DebugFormat("MultipartDownloadCommand: Discovering download strategy"); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(cancellationToken) + // Start unified download operation (discovers strategy and starts downloads) + _logger.DebugFormat("MultipartDownloadCommand: Starting unified download operation"); + var downloadResult = await coordinator.StartDownloadAsync(DownloadPartProgressEventCallback, cancellationToken) .ConfigureAwait(false); - totalBytes = discoveryResult.ObjectSize; - - - _logger.DebugFormat("MultipartDownloadCommand: Discovered {0} part(s), total size: {1} bytes, IsSinglePart={2}", - discoveryResult.TotalParts, discoveryResult.ObjectSize, discoveryResult.IsSinglePart); + totalBytes = downloadResult.ObjectSize; - // Step 2: Start concurrent downloads for all parts - _logger.DebugFormat("Starting downloads for {0} part(s)", discoveryResult.TotalParts); - await coordinator.StartDownloadsAsync(discoveryResult, DownloadPartProgressEventCallback, cancellationToken) - .ConfigureAwait(false); + _logger.DebugFormat("MultipartDownloadCommand: Downloaded {0} part(s), total size: {1} bytes, IsSinglePart={2}", + downloadResult.TotalParts, downloadResult.ObjectSize, downloadResult.IsSinglePart); - // Step 2b: Wait for all downloads to complete before returning + // Wait for all downloads to complete before returning // This ensures file is fully written and committed for file-based downloads // For stream-based downloads, this task completes immediately (no-op) _logger.DebugFormat("MultipartDownloadCommand: Waiting for download completion"); @@ -86,23 +80,23 @@ await coordinator.StartDownloadsAsync(discoveryResult, DownloadPartProgressEvent _logger.DebugFormat("MultipartDownloadCommand: Completed multipart download"); - // Step 3: Map the response from the initial GetObject response + // Map the response from the initial GetObject response // The initial response contains all the metadata we need - var mappedResponse = ResponseMapper.MapGetObjectResponse(discoveryResult.InitialResponse); + var mappedResponse = ResponseMapper.MapGetObjectResponse(downloadResult.InitialResponse); // SEP Part GET Step 7 / Ranged GET Step 9: // Set ContentLength to total object size (not just first part) - mappedResponse.Headers.ContentLength = discoveryResult.ObjectSize; + mappedResponse.Headers.ContentLength = downloadResult.ObjectSize; // Set ContentRange to represent the entire object: bytes 0-(ContentLength-1)/ContentLength // S3 returns null for 0-byte objects, so we match that behavior - if (discoveryResult.ObjectSize == 0) + if (downloadResult.ObjectSize == 0) { mappedResponse.ContentRange = null; } else { - mappedResponse.ContentRange = $"bytes 0-{discoveryResult.ObjectSize - 1}/{discoveryResult.ObjectSize}"; + mappedResponse.ContentRange = $"bytes 0-{downloadResult.ObjectSize - 1}/{downloadResult.ObjectSize}"; } // SEP Part GET Step 7 / Ranged GET Step 9: diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs index bf0e14a6dda3..29f966f34569 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs @@ -64,17 +64,16 @@ private async Task CreateInitializedStreamAsync( ? MultipartDownloadTestHelpers.CreateSinglePartResponse(objectSize) : new GetObjectResponse(); - var discoveryResult = new DownloadDiscoveryResult + var discoveryResult = new DownloadResult { TotalParts = totalParts, ObjectSize = objectSize, InitialResponse = mockResponse }; - _mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + _mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) .ReturnsAsync(discoveryResult); - _mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) - .Returns(Task.CompletedTask); var stream = CreateStream(); await stream.InitializeAsync(CancellationToken.None); @@ -157,11 +156,11 @@ public void Create_WithNullParameter_ThrowsArgumentNullException( #region InitializeAsync Tests - Single Part [TestMethod] - public async Task InitializeAsync_SinglePart_UsesSinglePartHandler() + public async Task InitializeAsync_SinglePart_SetsCorrectDiscoveryResult() { // Arrange var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); - var discoveryResult = new DownloadDiscoveryResult + var discoveryResult = new DownloadResult { TotalParts = 1, ObjectSize = 1024, @@ -169,7 +168,8 @@ public async Task InitializeAsync_SinglePart_UsesSinglePartHandler() }; var mockCoordinator = new Mock(); - mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) .ReturnsAsync(discoveryResult); var mockBufferManager = new Mock(); @@ -182,25 +182,25 @@ public async Task InitializeAsync_SinglePart_UsesSinglePartHandler() // Assert Assert.IsNotNull(stream.DiscoveryResult); Assert.AreEqual(1, stream.DiscoveryResult.TotalParts); + Assert.AreEqual(1024, stream.DiscoveryResult.ObjectSize); } + [TestMethod] - public async Task InitializeAsync_SinglePart_CallsStartDownloads() + public async Task InitializeAsync_Multipart_UsesMultipartHandler() { // Arrange - var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); - var discoveryResult = new DownloadDiscoveryResult + var discoveryResult = new DownloadResult { - TotalParts = 1, - ObjectSize = 1024, - InitialResponse = mockResponse + TotalParts = 5, + ObjectSize = 50 * 1024 * 1024, + InitialResponse = new GetObjectResponse() }; var mockCoordinator = new Mock(); - mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) .ReturnsAsync(discoveryResult); - mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) - .Returns(Task.CompletedTask); var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); @@ -210,31 +210,25 @@ public async Task InitializeAsync_SinglePart_CallsStartDownloads() await stream.InitializeAsync(CancellationToken.None); // Assert - mockCoordinator.Verify( - x => x.StartDownloadsAsync(discoveryResult, It.IsAny>(), It.IsAny()), - Times.Once); + Assert.AreEqual(5, stream.DiscoveryResult.TotalParts); } - #endregion - - #region InitializeAsync Tests - Multipart - [TestMethod] - public async Task InitializeAsync_Multipart_UsesMultipartHandler() + public async Task InitializeAsync_SinglePart_CallsStartDownloads() { // Arrange - var discoveryResult = new DownloadDiscoveryResult + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var discoveryResult = new DownloadResult { - TotalParts = 5, - ObjectSize = 50 * 1024 * 1024, - InitialResponse = new GetObjectResponse() + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = mockResponse }; var mockCoordinator = new Mock(); - mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) .ReturnsAsync(discoveryResult); - mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) - .Returns(Task.CompletedTask); var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); @@ -244,14 +238,21 @@ public async Task InitializeAsync_Multipart_UsesMultipartHandler() await stream.InitializeAsync(CancellationToken.None); // Assert - Assert.AreEqual(5, stream.DiscoveryResult.TotalParts); + mockCoordinator.Verify( + x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny()), + Times.Once); } + #endregion + + #region InitializeAsync Tests - Multipart + [TestMethod] public async Task InitializeAsync_Multipart_StartsDownloads() { // Arrange - var discoveryResult = new DownloadDiscoveryResult + var discoveryResult = new DownloadResult { TotalParts = 5, ObjectSize = 50 * 1024 * 1024, @@ -259,10 +260,9 @@ public async Task InitializeAsync_Multipart_StartsDownloads() }; var mockCoordinator = new Mock(); - mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) .ReturnsAsync(discoveryResult); - mockCoordinator.Setup(x => x.StartDownloadsAsync(It.IsAny(), It.IsAny>(), It.IsAny())) - .Returns(Task.CompletedTask); var mockBufferManager = new Mock(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); @@ -273,7 +273,8 @@ public async Task InitializeAsync_Multipart_StartsDownloads() // Assert mockCoordinator.Verify( - x => x.StartDownloadsAsync(discoveryResult, It.IsAny>(), It.IsAny()), + x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny()), Times.Once); } @@ -285,7 +286,7 @@ public async Task InitializeAsync_Multipart_StartsDownloads() public async Task InitializeAsync_SetsDiscoveryResult() { // Arrange - var discoveryResult = new DownloadDiscoveryResult + var discoveryResult = new DownloadResult { TotalParts = 1, ObjectSize = 1024, @@ -293,7 +294,8 @@ public async Task InitializeAsync_SetsDiscoveryResult() }; var mockCoordinator = new Mock(); - mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) .ReturnsAsync(discoveryResult); var mockBufferManager = new Mock(); @@ -314,7 +316,7 @@ public async Task InitializeAsync_CalledTwice_ThrowsInvalidOperationException() { // Arrange var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); - var discoveryResult = new DownloadDiscoveryResult + var discoveryResult = new DownloadResult { TotalParts = 1, ObjectSize = 1024, @@ -322,7 +324,8 @@ public async Task InitializeAsync_CalledTwice_ThrowsInvalidOperationException() }; var mockCoordinator = new Mock(); - mockCoordinator.Setup(x => x.DiscoverDownloadStrategyAsync(It.IsAny())) + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) .ReturnsAsync(discoveryResult); var mockBufferManager = new Mock(); diff --git a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs index 3d0c41243648..819ea95b2f2a 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs @@ -56,7 +56,7 @@ private async Task ExecuteConcurrentWriteTest( destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Determine write order (default to sequential if not specified) var order = writeOrder ?? Enumerable.Range(1, partCount).ToArray(); @@ -101,7 +101,7 @@ private async Task ExecuteVaryingSizeTest( destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var totalSize = partDefinitions.Sum(p => p.Size); var order = writeOrder ?? Enumerable.Range(0, partDefinitions.Length).ToArray(); diff --git a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs index 37bf03a2c179..46c2a7536bb4 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs @@ -62,7 +62,7 @@ public async Task PrepareAsync_CreatesTempFile() var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - var discoveryResult = new DownloadDiscoveryResult + var discoveryResult = new DownloadResult { TotalParts = 1, ObjectSize = 1024 @@ -85,7 +85,7 @@ public async Task PrepareAsync_TempFileFollowsPattern() var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - var discoveryResult = new DownloadDiscoveryResult(); + var discoveryResult = new DownloadResult(); // Act await handler.PrepareAsync(discoveryResult, CancellationToken.None); @@ -102,7 +102,7 @@ public async Task PrepareAsync_ReturnsCompletedTask() var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( destinationPath: Path.Combine(_testDirectory, "test.dat")); var handler = new FilePartDataHandler(config); - var discoveryResult = new DownloadDiscoveryResult(); + var discoveryResult = new DownloadResult(); // Act var task = handler.PrepareAsync(discoveryResult, CancellationToken.None); @@ -125,7 +125,7 @@ public async Task ProcessPartAsync_WritesDataToFile() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); var response = new GetObjectResponse @@ -156,7 +156,7 @@ public async Task ProcessPartAsync_WritesAtCorrectOffset() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Write part 2 (offset 1024) var part2Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 1024); @@ -192,7 +192,7 @@ public async Task ProcessPartAsync_ParsesContentRangeForOffset() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); var response = new GetObjectResponse @@ -225,7 +225,7 @@ public async Task ProcessPartAsync_MissingContentRange_ThrowsInvalidOperationExc destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); var response = new GetObjectResponse @@ -250,7 +250,7 @@ public async Task ProcessPartAsync_InvalidContentRange_ThrowsInvalidOperationExc destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); var response = new GetObjectResponse @@ -277,7 +277,7 @@ public async Task ProcessPartAsync_PreservesDataIntegrity() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.CreateMixedPattern(10240, 42); var response = new GetObjectResponse @@ -305,7 +305,7 @@ public async Task ProcessPartAsync_HandlesZeroByteResponse() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var response = new GetObjectResponse { @@ -327,7 +327,7 @@ public async Task ProcessPartAsync_HandlesSmallPart() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); var response = new GetObjectResponse @@ -355,7 +355,7 @@ public async Task ProcessPartAsync_HandlesLargePart() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partSize = 16 * 1024 * 1024; // 16MB var partData = MultipartDownloadTestHelpers.GenerateTestData(partSize, 0); @@ -384,7 +384,7 @@ public async Task ProcessPartAsync_MultipleWritesPreserveAllData() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Write part 1 var part1Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); @@ -431,7 +431,7 @@ public async Task ProcessPartAsync_SupportsConcurrentWrites() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Create multiple parts var part1Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); @@ -489,7 +489,7 @@ public async Task ProcessPartAsync_ConcurrentWritesDontInterfere() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Create 10 parts with distinct patterns var tasks = new Task[10]; @@ -563,7 +563,7 @@ public async Task ProcessPartAsync_WithCancelledToken_ThrowsTaskCanceledExceptio destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); var response = new GetObjectResponse @@ -657,7 +657,7 @@ public async Task OnDownloadComplete_WithSuccess_CommitsTempFile() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); var response = new GetObjectResponse @@ -689,7 +689,7 @@ public async Task OnDownloadComplete_WithSuccess_DestinationContainsAllData() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Write 3 parts for (int i = 0; i < 3; i++) @@ -726,7 +726,7 @@ public async Task OnDownloadComplete_WithFailure_CleansTempFile() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Act handler.OnDownloadComplete(new Exception("Download failed")); @@ -744,7 +744,7 @@ public async Task OnDownloadComplete_WithDifferentExceptions_AllHandledCorrectly var config1 = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( destinationPath: destinationPath1); var handler1 = new FilePartDataHandler(config1); - await handler1.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler1.PrepareAsync(new DownloadResult(), CancellationToken.None); handler1.OnDownloadComplete(new OperationCanceledException()); Assert.IsFalse(File.Exists(destinationPath1)); @@ -753,7 +753,7 @@ public async Task OnDownloadComplete_WithDifferentExceptions_AllHandledCorrectly var config2 = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( destinationPath: destinationPath2); var handler2 = new FilePartDataHandler(config2); - await handler2.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler2.PrepareAsync(new DownloadResult(), CancellationToken.None); handler2.OnDownloadComplete(new IOException("IO error")); Assert.IsFalse(File.Exists(destinationPath2)); } @@ -771,7 +771,7 @@ public async Task Dispose_CleansUpUncommittedFile() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Act handler.Dispose(); @@ -790,7 +790,7 @@ public async Task Dispose_AfterCommit_DoesNotDeleteDestination() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); var response = new GetObjectResponse @@ -853,7 +853,7 @@ public async Task Integration_CompleteWorkflow_ProducesCorrectFile() var handler = new FilePartDataHandler(config); // Act - Simulate complete download workflow - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Download 5 parts for (int i = 0; i < 5; i++) @@ -888,7 +888,7 @@ public async Task Integration_ParallelDownload_ProducesCorrectFile() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Act - Download parts in parallel (reverse order to test offset handling) var tasks = new Task[5]; @@ -926,7 +926,7 @@ public async Task Integration_FailedDownload_CleansUpProperly() var handler = new FilePartDataHandler(config); // Act - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); var response = new GetObjectResponse @@ -955,7 +955,7 @@ public async Task Integration_LargeFileDownload_HandlesCorrectly() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Act - Download 3 parts of 1MB each for (int i = 0; i < 3; i++) @@ -990,7 +990,7 @@ public async Task Integration_SingleByteFile_HandlesCorrectly() destinationPath: destinationPath); var handler = new FilePartDataHandler(config); - await handler.PrepareAsync(new DownloadDiscoveryResult(), CancellationToken.None); + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); // Act - Download single byte var partData = new byte[] { 0x42 }; diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index d39db70c38d3..f77562ac617f 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -231,7 +231,7 @@ public async Task DiscoverUsingPartStrategy_WithNullPartsCount_ReturnsSinglePart var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.IsNotNull(result); @@ -259,7 +259,7 @@ public async Task DiscoverUsingPartStrategy_WithPartsCountOne_ReturnsSinglePart( var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.AreEqual(1, result.TotalParts); @@ -280,7 +280,7 @@ public async Task DiscoverUsingPartStrategy_SinglePart_DoesNotBufferFirstPart() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - Single-part does not buffer during discovery Assert.IsNotNull(result.InitialResponse); @@ -310,7 +310,7 @@ public async Task DiscoverUsingPartStrategy_WithMultipleParts_ReturnsMultipart() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.AreEqual(5, result.TotalParts); @@ -338,7 +338,7 @@ public async Task DiscoverUsingPartStrategy_Multipart_BuffersFirstPart() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - Multipart returns response with stream for buffering in StartDownloadsAsync Assert.IsNotNull(result.InitialResponse); @@ -360,7 +360,7 @@ public async Task DiscoverUsingPartStrategy_SavesETag() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - ETag is saved internally (verified through subsequent validation) Assert.IsNotNull(result); @@ -389,7 +389,7 @@ public async Task DiscoverUsingPartStrategy_ParsesContentRange() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.AreEqual(totalObjectSize, result.ObjectSize); @@ -415,7 +415,7 @@ public async Task DiscoverUsingPartStrategy_WithInvalidContentRange_ThrowsExcept var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); } #endregion @@ -443,7 +443,7 @@ public async Task DiscoverUsingRangeStrategy_SmallObject_ReturnsSinglePart() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.AreEqual(1, result.TotalParts); @@ -477,7 +477,7 @@ public async Task DiscoverUsingRangeStrategy_SinglePartRange_ReturnsSinglePart() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.AreEqual(1, result.TotalParts); @@ -509,7 +509,7 @@ public async Task DiscoverUsingRangeStrategy_Multipart_ReturnsMultipart() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.AreEqual(7, result.TotalParts); // 52428800 / 8388608 = 6.25 -> 7 parts @@ -541,7 +541,7 @@ public async Task DiscoverUsingRangeStrategy_Multipart_ValidatesContentLength() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); } [TestMethod] @@ -561,7 +561,7 @@ public async Task DiscoverUsingRangeStrategy_SavesETag() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - ETag is saved internally Assert.IsNotNull(result); @@ -587,7 +587,7 @@ public async Task DiscoverUsingRangeStrategy_CalculatesPartCount() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.AreEqual(7, result.TotalParts); // Ceiling(52428800 / 8388608) = 7 @@ -609,25 +609,30 @@ public async Task StartDownloadsAsync_SinglePart_ReturnsImmediately() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - should complete without any additional downloads (discovery already made the call) mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Once); } [TestMethod] - [ExpectedException(typeof(ArgumentNullException))] - public async Task StartDownloadsAsync_WithNullDiscoveryResult_ThrowsArgumentNullException() + public async Task StartDownloadsAsync_SinglePart_ProcessesPartSynchronously() { // Arrange - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); // Act - await coordinator.StartDownloadsAsync(null, null, CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - should return discovery result immediately for single-part downloads + Assert.IsNotNull(result); + Assert.AreEqual(1, result.TotalParts); } #endregion @@ -647,10 +652,10 @@ public async Task Validation_Failures_ThrowInvalidOperationException( // Arrange var mockClient = MultipartDownloadTestHelpers.CreateMockClientWithValidationFailure(failureType); var coordinator = MultipartDownloadTestHelpers.CreateCoordinatorForValidationTest(mockClient.Object, failureType); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act & Assert (exception expected via attribute) - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } @@ -671,10 +676,10 @@ public async Task Validation_ETag_Matching_Succeeds() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - should succeed with matching ETags - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - no exception thrown } @@ -713,10 +718,10 @@ public async Task Validation_ContentRange_ValidRange_Succeeds() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - should succeed with valid ranges - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - no exception thrown } @@ -776,10 +781,10 @@ public async Task StartDownloadsAsync_MultipartDownload_AcquiresCapacitySequenti var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Wait for background task completion await coordinator.DownloadCompletionTask; @@ -836,10 +841,10 @@ public async Task StartDownloadsAsync_MultipartDownload_DoesNotCallWaitForCapaci var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Assert @@ -903,10 +908,10 @@ public async Task StartDownloadsAsync_BackgroundTask_InterleavesCapacityAcquisit var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Assert @@ -1023,11 +1028,11 @@ public async Task StartDownloadsAsync_PreventRaceConditionDeadlock_WithLimitedBu var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - This should not deadlock with the new sequential approach var startTime = DateTime.UtcNow; - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; var endTime = DateTime.UtcNow; @@ -1095,10 +1100,10 @@ public async Task StartDownloadsAsync_SequentialCapacityAcquisition_PreventsOutO var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Assert - Capacity acquisition should be in order, preventing blocking @@ -1137,10 +1142,10 @@ public async Task StartDownloadsAsync_BackgroundTaskSuccess_DisposesCancellation var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Wait for background task to complete await coordinator.DownloadCompletionTask; @@ -1194,10 +1199,10 @@ public async Task StartDownloadsAsync_BackgroundTaskFailure_DisposesCancellation var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Wait for background task to complete (with failure) try @@ -1236,7 +1241,7 @@ public async Task StartDownloadsAsync_EarlyError_DisposesCancellationTokenSource // Simulate error during PrepareAsync (before background task is created) mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); var totalParts = 2; @@ -1252,12 +1257,12 @@ public async Task StartDownloadsAsync_EarlyError_DisposesCancellationTokenSource var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act & Assert try { - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); Assert.Fail("Expected InvalidOperationException to be thrown"); } catch (InvalidOperationException ex) @@ -1318,10 +1323,10 @@ public async Task StartDownloadsAsync_BackgroundTaskCancellation_HandlesTokenDis var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Wait for background task cancellation try @@ -1372,7 +1377,7 @@ public async Task Operations_AfterDispose_ThrowObjectDisposedException() // Act coordinator.Dispose(); - await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); } #endregion @@ -1396,9 +1401,29 @@ public async Task DiscoverDownloadStrategyAsync_WhenCancelled_ThrowsOperationCan cts.Cancel(); // Act - await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + await coordinator.StartDownloadAsync(null, cts.Token); + } + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task StartDownloadAsync_SinglePart_WithPreCancelledToken_ThrowsOperationCanceledException() + { + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + await coordinator.StartDownloadAsync(null, cts.Token); } + [TestMethod] public async Task DiscoverDownloadStrategyAsync_WhenCancelled_SetsDownloadException() { @@ -1418,7 +1443,7 @@ public async Task DiscoverDownloadStrategyAsync_WhenCancelled_SetsDownloadExcept // Act try { - await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + await coordinator.StartDownloadAsync(null, cts.Token); } catch (OperationCanceledException) { @@ -1447,7 +1472,7 @@ public async Task DiscoverDownloadStrategyAsync_PassesCancellationTokenToS3Clien var cts = new CancellationTokenSource(); // Act - await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + await coordinator.StartDownloadAsync(null, cts.Token); // Assert Assert.AreEqual(cts.Token, capturedToken); @@ -1470,13 +1495,13 @@ public async Task StartDownloadsAsync_WhenCancelledBeforeStart_ThrowsOperationCa var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var cts = new CancellationTokenSource(); cts.Cancel(); // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, cts.Token); + await coordinator.StartDownloadAsync(null, cts.Token); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } @@ -1512,12 +1537,11 @@ public async Task StartDownloadsAsync_WhenCancelledDuringDownloads_NotifiesBuffe var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); // Act try { - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } catch (OperationCanceledException) @@ -1557,12 +1581,12 @@ public async Task StartDownloadsAsync_WhenCancelled_SetsDownloadException() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act try { - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } catch (OperationCanceledException) @@ -1591,41 +1615,17 @@ public async Task StartDownloadsAsync_PassesCancellationTokenToBufferManager() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var cts = new CancellationTokenSource(); // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, cts.Token); + var result = await coordinator.StartDownloadAsync(null, cts.Token); // Assert - The cancellation token was passed through to the data handler - Assert.IsNotNull(discoveryResult); + Assert.IsNotNull(result); } - [TestMethod] - public async Task StartDownloadsAsync_SinglePart_DoesNotThrowOnCancellation() - { - // Arrange - Single part download should return immediately without using cancellation token - var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( - (req, ct) => Task.FromResult(mockResponse)); - - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - - // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - - var cts = new CancellationTokenSource(); - cts.Cancel(); - - // Act - should complete without throwing even though token is cancelled - await coordinator.StartDownloadsAsync(discoveryResult, null, cts.Token); - - // Assert - discovery already made the S3 call, StartDownloadsAsync doesn't make additional calls for single-part - mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Once); - } [TestMethod] public async Task StartDownloadsAsync_CancellationPropagatesAcrossConcurrentDownloads() @@ -1667,12 +1667,12 @@ public async Task StartDownloadsAsync_CancellationPropagatesAcrossConcurrentDown var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act try { - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } catch (OperationCanceledException) @@ -1702,7 +1702,7 @@ public async Task Coordinator_CanBeDisposedAfterCancellation() // Act try { - await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + await coordinator.StartDownloadAsync(null, cts.Token); } catch (OperationCanceledException) { @@ -1746,10 +1746,10 @@ public async Task StartDownloadsAsync_RangeStrategy_CancellationDuringDownloads( var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions } @@ -1800,11 +1800,11 @@ public async Task StartDownloadsAsync_ReturnsImmediately_PreventsDeadlock() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - StartDownloadsAsync should return immediately (not wait for all downloads) var stopwatch = System.Diagnostics.Stopwatch.StartNew(); - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); stopwatch.Stop(); // Assert - StartDownloadsAsync should return almost immediately @@ -1847,11 +1847,11 @@ public async Task StartDownloadsAsync_SinglePart_ReturnsImmediatelyWithoutBackgr var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act var stopwatch = System.Diagnostics.Stopwatch.StartNew(); - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); stopwatch.Stop(); // DownloadCompletionTask should be completed immediately (no background work) @@ -1867,13 +1867,12 @@ public async Task StartDownloadsAsync_SinglePart_ReturnsImmediatelyWithoutBackgr #region Capacity Checking Tests [TestMethod] - public async Task DiscoverUsingPartStrategy_CallsWaitForCapacityAsync() + public async Task Discovery_PartStrategy_CallsWaitForCapacityAsync() { - // Arrange + // Arrange - PART strategy should check capacity during discovery var capacityCallCount = 0; var mockDataHandler = new Mock(); - // Track WaitForCapacityAsync calls mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(() => @@ -1886,8 +1885,12 @@ public async Task DiscoverUsingPartStrategy_CallsWaitForCapacityAsync() .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) .Returns(Task.CompletedTask); + var totalObjectSize = 24 * 1024 * 1024; // 24MB -> 3 parts @ 8MB + var partSize = 8 * 1024 * 1024; // 8MB + var totalParts = 3; + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( - 8 * 1024 * 1024, 3, 24 * 1024 * 1024, "test-etag"); + partSize, totalParts, totalObjectSize, "test-etag"); var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( (req, ct) => Task.FromResult(mockResponse)); @@ -1895,28 +1898,27 @@ public async Task DiscoverUsingPartStrategy_CallsWaitForCapacityAsync() var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( downloadType: MultipartDownloadType.PART); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - Assert.AreEqual(1, capacityCallCount, "WaitForCapacityAsync should be called exactly once during Part 1 discovery"); Assert.IsNotNull(result); Assert.AreEqual(3, result.TotalParts); - - // Verify the mock was called with correct setup - mockDataHandler.Verify(x => x.WaitForCapacityAsync(It.IsAny()), Times.Once); + Assert.AreEqual(1, capacityCallCount, + "PART strategy should call WaitForCapacityAsync during Part 1 discovery"); } + [TestMethod] - public async Task DiscoverUsingRangeStrategy_CallsWaitForCapacityAsync() + public async Task Discovery_RangeStrategy_CallsWaitForCapacityAsync() { - // Arrange + // Arrange - RANGE strategy should also check capacity during discovery var capacityCallCount = 0; var mockDataHandler = new Mock(); - // Track WaitForCapacityAsync calls mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(() => @@ -1929,8 +1931,9 @@ public async Task DiscoverUsingRangeStrategy_CallsWaitForCapacityAsync() .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) .Returns(Task.CompletedTask); - var totalObjectSize = 52428800; // 50MB - var partSize = 8388608; // 8MB + var totalObjectSize = 17 * 1024 * 1024; // 17MB -> 3 parts @ 8MB + var partSize = 8 * 1024 * 1024; // 8MB + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( 0, partSize - 1, totalObjectSize, "test-etag"); @@ -1941,21 +1944,19 @@ public async Task DiscoverUsingRangeStrategy_CallsWaitForCapacityAsync() partSize: partSize, downloadType: MultipartDownloadType.RANGE); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert - Assert.AreEqual(1, capacityCallCount, "WaitForCapacityAsync should be called exactly once during Part 1 discovery"); Assert.IsNotNull(result); - Assert.AreEqual(7, result.TotalParts); // 52428800 / 8388608 = 6.25 -> 7 parts - - // Verify the mock was called with correct setup - mockDataHandler.Verify(x => x.WaitForCapacityAsync(It.IsAny()), Times.Once); + Assert.AreEqual(3, result.TotalParts); // 17MB / 8MB = 3 parts (ceiling) + Assert.AreEqual(1, capacityCallCount, + "RANGE strategy should call WaitForCapacityAsync during Part 1 discovery"); } - [TestMethod] public async Task MultipleDownloads_WithSharedHttpThrottler_RespectsLimits() { @@ -1980,11 +1981,8 @@ public async Task MultipleDownloads_WithSharedHttpThrottler_RespectsLimits() var coordinator1 = new MultipartDownloadManager(mockClient1.Object, request1, config, mockDataHandler1.Object, null, sharedThrottler); var coordinator2 = new MultipartDownloadManager(mockClient2.Object, request2, config, mockDataHandler2.Object, null, sharedThrottler); - var discovery1 = await coordinator1.DiscoverDownloadStrategyAsync(CancellationToken.None); - await coordinator1.StartDownloadsAsync(discovery1, null, CancellationToken.None); - - var discovery2 = await coordinator2.DiscoverDownloadStrategyAsync(CancellationToken.None); - await coordinator2.StartDownloadsAsync(discovery2, null, CancellationToken.None); + var download1 = await coordinator1.StartDownloadAsync(null, CancellationToken.None); + var download2 = await coordinator2.StartDownloadAsync(null, CancellationToken.None); // Wait for all background work to complete await Task.WhenAll( @@ -1993,8 +1991,8 @@ await Task.WhenAll( ); // Assert - Both should complete successfully and semaphore should be fully released - Assert.IsNotNull(discovery1); - Assert.IsNotNull(discovery2); + Assert.IsNotNull(download1); + Assert.IsNotNull(download2); Assert.AreEqual(1, sharedThrottler.CurrentCount, "HTTP throttler should be fully released after complete download lifecycle"); // Cleanup @@ -2025,7 +2023,7 @@ public async Task Discovery_HttpRequestFails_ReleasesCapacityProperly() // Act & Assert try { - await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); Assert.Fail("Expected InvalidOperationException to be thrown"); } catch (InvalidOperationException ex) @@ -2068,7 +2066,7 @@ public async Task Discovery_CancellationDuringCapacityWait_ReleasesHttpSlotPrope // Act & Assert try { - await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + await coordinator.StartDownloadAsync(null, cts.Token); Assert.Fail("Expected OperationCanceledException to be thrown"); } catch (OperationCanceledException) @@ -2117,7 +2115,7 @@ public async Task Discovery_CancellationAfterCapacityButBeforeHttp_ReleasesHttpS try { cts.Cancel(); // Cancel before discovery - await coordinator.DiscoverDownloadStrategyAsync(cts.Token); + await coordinator.StartDownloadAsync(null, cts.Token); Assert.Fail("Expected OperationCanceledException to be thrown"); } catch (OperationCanceledException) @@ -2161,7 +2159,7 @@ public async Task Discovery_SinglePart_StillCallsCapacityCheck() var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); // Act - var result = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); // Assert Assert.IsNotNull(result); @@ -2170,6 +2168,7 @@ public async Task Discovery_SinglePart_StillCallsCapacityCheck() "Even single-part downloads should call WaitForCapacityAsync during discovery"); } + #endregion #region Concurrency Control Tests @@ -2231,10 +2230,10 @@ public async Task HttpSemaphore_HeldThroughProcessPartAsync() var coordinator = new MultipartDownloadManager( mockClient.Object, request, config, mockDataHandler.Object, null, httpSemaphore); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - var startTask = coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + var startTask = coordinator.StartDownloadAsync(null, CancellationToken.None); // Wait for Part 1 to enter ProcessPartAsync await part1EnteredProcessPart.Task; @@ -2306,10 +2305,10 @@ public async Task HttpSemaphore_RangeStrategy_HeldThroughProcessPartAsync() var coordinator = new MultipartDownloadManager( mockClient.Object, request, config, mockDataHandler.Object, null, httpSemaphore); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - var startTask = coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + var startTask = coordinator.StartDownloadAsync(null, CancellationToken.None); await part1EnteredProcessPart.Task; // Check semaphore state while Part 1 is in ProcessPartAsync @@ -2332,136 +2331,105 @@ public async Task HttpSemaphore_RangeStrategy_HeldThroughProcessPartAsync() #region Semaphore Release Error Path Tests [TestMethod] - public async Task StartDownloadsAsync_PrepareAsyncFails_ReleasesHttpSemaphore() + public async Task Discovery_WaitForCapacityFails_DoesNotReleaseHttpSemaphore() { - // Arrange - PrepareAsync fails but semaphore was acquired during discovery + // Arrange - Test that semaphore is NOT released when it was never acquired var httpThrottler = new SemaphoreSlim(2, 2); var initialCount = httpThrottler.CurrentCount; var mockDataHandler = new Mock(); - // WaitForCapacityAsync succeeds (buffer space available) + // WaitForCapacityAsync fails BEFORE HTTP semaphore is acquired mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) - .Returns(Task.CompletedTask); - - // PrepareAsync fails BEFORE Part 1 processing - mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) - .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); - - var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( - 8 * 1024 * 1024, 2, 16 * 1024 * 1024, "test-etag"); - - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( - (req, ct) => Task.FromResult(mockResponse)); + .ThrowsAsync(new InvalidOperationException("Simulated capacity wait failure")); - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( - downloadType: MultipartDownloadType.PART); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager( mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); - - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - - // After discovery, semaphore should have 1 slot held (2 total - 1 used = 1 available) - Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, - "After discovery, semaphore should have 1 slot held"); // Act & Assert try { - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); Assert.Fail("Expected InvalidOperationException to be thrown"); } catch (InvalidOperationException ex) { - Assert.AreEqual("Simulated prepare failure", ex.Message); + Assert.AreEqual("Simulated capacity wait failure", ex.Message); } + // Assert - Semaphore should NOT be released (it was never acquired) Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP semaphore should be released when PrepareAsync fails"); + "HTTP semaphore should NOT be released when it was never acquired (failed before WaitAsync)"); // Cleanup httpThrottler.Dispose(); } + + [TestMethod] - public async Task StartDownloadsAsync_Part1ProcessingFails_ReleasesHttpSemaphore() + public async Task StartDownloadAsync_WaitForCapacityFails_DoesNotReleaseHttpSemaphore() { - // Arrange - Test that finally block correctly releases semaphore when Part 1 processing fails + // Arrange - Test that semaphore is NOT released when it was never acquired var httpThrottler = new SemaphoreSlim(2, 2); var initialCount = httpThrottler.CurrentCount; var mockDataHandler = new Mock(); - // WaitForCapacityAsync succeeds + // WaitForCapacityAsync fails BEFORE HTTP semaphore is acquired mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) - .Returns(Task.CompletedTask); - - // PrepareAsync succeeds - mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) - .Returns(Task.CompletedTask); - - // ProcessPartAsync fails for Part 1 - mockDataHandler - .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) - .ThrowsAsync(new InvalidOperationException("Simulated Part 1 processing failure")); - - var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( - 8 * 1024 * 1024, 2, 16 * 1024 * 1024, "test-etag"); - - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( - (req, ct) => Task.FromResult(mockResponse)); + .ThrowsAsync(new InvalidOperationException("Simulated capacity wait failure")); - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( - downloadType: MultipartDownloadType.PART); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager( mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); - - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - - // After discovery, semaphore should have 1 slot held - Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, - "After discovery, semaphore should have 1 slot held"); // Act & Assert try { - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); Assert.Fail("Expected InvalidOperationException to be thrown"); } catch (InvalidOperationException ex) { - Assert.AreEqual("Simulated Part 1 processing failure", ex.Message); + Assert.AreEqual("Simulated capacity wait failure", ex.Message); } - // Assert - Finally block should release semaphore + // Assert - Semaphore should NOT be released (it was never acquired) Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP semaphore should be released by finally block when Part 1 processing fails"); + "HTTP semaphore should NOT be released when it was never acquired (failed before WaitAsync)"); // Cleanup httpThrottler.Dispose(); } [TestMethod] - public async Task Discovery_WaitForCapacityFails_DoesNotReleaseHttpSemaphore() + public async Task Discovery_HttpRequestAfterCapacityFails_ReleasesHttpSemaphore() { - // Arrange - Test that semaphore is NOT released when it was never acquired + // Arrange - Test semaphore release when HTTP request fails after capacity is acquired var httpThrottler = new SemaphoreSlim(2, 2); var initialCount = httpThrottler.CurrentCount; var mockDataHandler = new Mock(); - // WaitForCapacityAsync fails BEFORE HTTP semaphore is acquired + // WaitForCapacityAsync succeeds (capacity acquired) mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) - .ThrowsAsync(new InvalidOperationException("Simulated capacity wait failure")); + .Returns(Task.CompletedTask); + + // HTTP request fails AFTER both capacity types are acquired + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated S3 failure after capacity acquired")); - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager( @@ -2470,74 +2438,111 @@ public async Task Discovery_WaitForCapacityFails_DoesNotReleaseHttpSemaphore() // Act & Assert try { - await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); Assert.Fail("Expected InvalidOperationException to be thrown"); } catch (InvalidOperationException ex) { - Assert.AreEqual("Simulated capacity wait failure", ex.Message); + Assert.AreEqual("Simulated S3 failure after capacity acquired", ex.Message); } - // Assert - Semaphore should NOT be released (it was never acquired) + // Assert - HTTP semaphore should be released by catch block in discovery Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP semaphore should NOT be released when it was never acquired (failed before WaitAsync)"); + "HTTP semaphore should be released when HTTP request fails in discovery"); // Cleanup httpThrottler.Dispose(); } [TestMethod] - public async Task StartDownloadsAsync_BackgroundPartHttpFails_ReleasesHttpSemaphore() + public async Task StartDownloadAsync_PrepareAsyncFails_ReleasesHttpSemaphore() { - // Arrange - Test that background part download failures properly release semaphore - var totalParts = 3; - var partSize = 8 * 1024 * 1024; - var totalObjectSize = totalParts * partSize; - + // Arrange - Test that HTTP semaphore is released when PrepareAsync fails after discovery var httpThrottler = new SemaphoreSlim(2, 2); var initialCount = httpThrottler.CurrentCount; var mockDataHandler = new Mock(); - // WaitForCapacityAsync succeeds for all parts + // WaitForCapacityAsync succeeds mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(Task.CompletedTask); - // PrepareAsync succeeds + // ProcessPartAsync succeeds for Part 1 (discovery) mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) .Returns(Task.CompletedTask); - // ProcessPartAsync succeeds for Part 1, but not called for Part 2 (HTTP fails first) + // PrepareAsync FAILS (this happens after Part 1 processing in StartDownloadAsync) mockDataHandler - .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) - .Returns(Task.CompletedTask); + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + 2, 8 * 1024 * 1024, 16 * 1024 * 1024, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated prepare failure", ex.Message); + } + + // Assert - HTTP semaphore should be released even when PrepareAsync fails + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when PrepareAsync fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadAsync_BackgroundPartHttpFails_ReleasesHttpSemaphore() + { + // Arrange - Test that HTTP semaphore is released when background part HTTP request fails + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); - // ReleaseCapacity is called on failure + // Capacity checks succeed mockDataHandler - .Setup(x => x.ReleaseCapacity()); + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + // Part 1 processing succeeds mockDataHandler - .Setup(x => x.OnDownloadComplete(It.IsAny())); + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + // HTTP client: Part 1 succeeds, Part 2 HTTP request FAILS var callCount = 0; var mockClient = new Mock(); - mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) .Returns(() => { callCount++; if (callCount == 1) { - // Discovery call succeeds + // Part 1 discovery succeeds return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( - partSize, totalParts, totalObjectSize, "test-etag")); - } - else - { - // Background part HTTP request fails - throw new InvalidOperationException("Simulated HTTP failure for background part"); + 8 * 1024 * 1024, 2, 16 * 1024 * 1024, "test-etag")); } + // Part 2 HTTP request fails + throw new AmazonS3Exception("Simulated S3 HTTP failure"); }); var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( @@ -2545,164 +2550,128 @@ public async Task StartDownloadsAsync_BackgroundPartHttpFails_ReleasesHttpSemaph var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager( mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); - - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - - // After discovery, semaphore should have 1 slot held (for Part 1) - Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, - "After discovery, semaphore should have 1 slot held"); // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); - // Wait for background task to fail + // Wait for background task to complete with failure try { await coordinator.DownloadCompletionTask; } - catch (InvalidOperationException) + catch (AmazonS3Exception) { - // Expected failure from background task + // Expected } - // Assert - Semaphore should be fully released (Part 1 released in StartDownloadsAsync, - // Parts 2 and 3 released in CreateDownloadTaskAsync catch blocks) - Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP semaphore should be fully released after background part HTTP failure"); - - // Verify ReleaseCapacity was called twice (once for Part 2 that failed, once for Part 3 that got cancelled) - // With sequential capacity acquisition, Part 3 acquired capacity before Part 2's HTTP call failed - mockDataHandler.Verify(x => x.ReleaseCapacity(), Times.Exactly(2), - "ReleaseCapacity should be called for both Part 2 (failed) and Part 3 (cancelled after acquiring capacity)"); + // Assert - HTTP semaphore should be fully released after background failure + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when background part HTTP request fails"); // Cleanup httpThrottler.Dispose(); } [TestMethod] - public async Task StartDownloadsAsync_BackgroundPartProcessingFails_ReleasesHttpSemaphore() + public async Task StartDownloadAsync_Part1ProcessingFails_ReleasesHttpSemaphore() { - // Arrange - Test that background part ProcessPartAsync failures properly release semaphore - var totalParts = 3; - var partSize = 8 * 1024 * 1024; - var totalObjectSize = totalParts * partSize; - + // Arrange - Test that HTTP semaphore is released when Part 1 processing fails during discovery var httpThrottler = new SemaphoreSlim(2, 2); var initialCount = httpThrottler.CurrentCount; var mockDataHandler = new Mock(); - // WaitForCapacityAsync succeeds for all parts + // WaitForCapacityAsync succeeds mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(Task.CompletedTask); - // PrepareAsync succeeds - mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) - .Returns(Task.CompletedTask); - - // ProcessPartAsync succeeds for Part 1, fails for Part 2 - var processCallCount = 0; - mockDataHandler - .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) - .Returns((partNum, response, ct) => - { - processCallCount++; - if (partNum == 1) - { - return Task.CompletedTask; // Part 1 succeeds - } - throw new InvalidOperationException($"Simulated processing failure for Part {partNum}"); - }); - - // ReleaseCapacity is called on failure - mockDataHandler - .Setup(x => x.ReleaseCapacity()); - + // Part 1 ProcessPartAsync FAILS (during discovery phase of StartDownloadAsync) mockDataHandler - .Setup(x => x.OnDownloadComplete(It.IsAny())); + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated Part 1 processing failure")); var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( - totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + 2, 8 * 1024 * 1024, 16 * 1024 * 1024, "test-etag", usePartStrategy: true); var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( downloadType: MultipartDownloadType.PART); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); var coordinator = new MultipartDownloadManager( mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); - - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - - // After discovery, semaphore should have 1 slot held - Assert.AreEqual(initialCount - 1, httpThrottler.CurrentCount, - "After discovery, semaphore should have 1 slot held"); - // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); - - // Wait for background task to fail + // Act & Assert try { - await coordinator.DownloadCompletionTask; + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); } - catch (InvalidOperationException) + catch (InvalidOperationException ex) { - // Expected failure from background task + Assert.AreEqual("Simulated Part 1 processing failure", ex.Message); } - // Assert - Semaphore should be fully released - Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP semaphore should be fully released after background part processing failure"); - - // Verify ReleaseCapacity was called twice (once for Part 2 that failed, once for Part 3 that may have continued) - // With sequential capacity acquisition, Part 3 acquired capacity before Part 2's processing failed - mockDataHandler.Verify(x => x.ReleaseCapacity(), Times.Exactly(2), - "ReleaseCapacity should be called for both Part 2 (failed) and Part 3 (cancelled/failed after acquiring capacity)"); + // Assert - HTTP semaphore should be released when Part 1 processing fails + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when Part 1 processing fails during discovery"); // Cleanup httpThrottler.Dispose(); } [TestMethod] - public async Task Discovery_HttpRequestAfterCapacityFails_ReleasesHttpSemaphore() + public async Task StartDownloadAsync_BackgroundPartProcessingFails_ReleasesHttpSemaphore() { - // Arrange - Test semaphore release when HTTP request fails after capacity is acquired + // Arrange - Test that HTTP semaphore is released when background part processing fails var httpThrottler = new SemaphoreSlim(2, 2); var initialCount = httpThrottler.CurrentCount; var mockDataHandler = new Mock(); - // WaitForCapacityAsync succeeds (capacity acquired) + // Capacity checks succeed mockDataHandler .Setup(x => x.WaitForCapacityAsync(It.IsAny())) .Returns(Task.CompletedTask); - // HTTP request fails AFTER both capacity types are acquired - var mockClient = new Mock(); - mockClient - .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) - .ThrowsAsync(new InvalidOperationException("Simulated S3 failure after capacity acquired")); + // Part 1 processing succeeds, Part 2 processing FAILS + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum == 1) + { + return Task.CompletedTask; // Part 1 succeeds + } + throw new InvalidOperationException("Simulated Part 2 processing failure"); + }); - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + 2, 8 * 1024 * 1024, 16 * 1024 * 1024, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager( mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); - // Act & Assert + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for background task to complete with failure try { - await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); - Assert.Fail("Expected InvalidOperationException to be thrown"); + await coordinator.DownloadCompletionTask; } - catch (InvalidOperationException ex) + catch (InvalidOperationException) { - Assert.AreEqual("Simulated S3 failure after capacity acquired", ex.Message); + // Expected } - // Assert - HTTP semaphore should be released by catch block in discovery - Assert.AreEqual(initialCount, httpThrottler.CurrentCount, - "HTTP semaphore should be released when HTTP request fails in discovery"); + // Assert - HTTP semaphore should be fully released after background failure + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when background part processing fails"); // Cleanup httpThrottler.Dispose(); @@ -3253,10 +3222,10 @@ public async Task ProgressCallback_ConcurrentCompletion_FiresOnlyOneCompletionEv concurrentRequests: 3); // Allow all parts to complete simultaneously var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, progressCallback, CancellationToken.None); + await coordinator.StartDownloadAsync(progressCallback, CancellationToken.None); // Wait for async progress events to complete var success = await WaitForProgressEventsAsync(progressEvents, progressLock, totalObjectSize); @@ -3309,10 +3278,10 @@ public async Task ProgressCallback_MultiplePartsComplete_AggregatesCorrectly() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, progressCallback, CancellationToken.None); + await coordinator.StartDownloadAsync(progressCallback, CancellationToken.None); // Wait for async progress events to complete var success = await WaitForProgressEventsAsync(progressEvents, progressLock, totalObjectSize); @@ -3370,7 +3339,7 @@ public async Task StartDownloadsAsync_BackgroundPartFails_CancelsInternalToken() // PrepareAsync succeeds mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) .Returns(Task.CompletedTask); // ProcessPartAsync: Controlled execution order using TaskCompletionSource @@ -3417,10 +3386,10 @@ public async Task StartDownloadsAsync_BackgroundPartFails_CancelsInternalToken() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - Start downloads - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); // Wait for Part 3 to reach synchronization point await part3ReachedSyncPoint.Task; @@ -3471,7 +3440,7 @@ public async Task StartDownloadsAsync_MultiplePartsFail_HandlesGracefully() .Returns(Task.CompletedTask); mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) .Returns(Task.CompletedTask); // Part 1 succeeds, Parts 2, 3, 4 all fail @@ -3499,10 +3468,10 @@ public async Task StartDownloadsAsync_MultiplePartsFail_HandlesGracefully() var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); try { @@ -3534,7 +3503,7 @@ public async Task StartDownloadsAsync_CancellationRacesWithDispose_HandlesGracef .Returns(Task.CompletedTask); mockDataHandler - .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) .Returns(Task.CompletedTask); // Part 1 succeeds, Part 2 fails triggering cancellation @@ -3572,10 +3541,10 @@ public async Task StartDownloadsAsync_CancellationRacesWithDispose_HandlesGracef var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); - var discoveryResult = await coordinator.DiscoverDownloadStrategyAsync(CancellationToken.None); + // Act - await coordinator.StartDownloadsAsync(discoveryResult, null, CancellationToken.None); + await coordinator.StartDownloadAsync(null, CancellationToken.None); try { From 9ade4e113391817e9644028b70c969edd43c6a58 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Tue, 16 Dec 2025 09:41:40 -0500 Subject: [PATCH 56/56] queue fixes (#4228) --- .../Transfer/Internal/IDownloadManager.cs | 5 - .../Internal/MultipartDownloadManager.cs | 43 +- .../Custom/Transfer/Internal/TaskHelpers.cs | 48 +- .../Custom/MultipartDownloadManagerTests.cs | 767 ++++++++++++++++-- 4 files changed, 759 insertions(+), 104 deletions(-) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs index 662076ded6da..3c54bfcd8b00 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs @@ -55,11 +55,6 @@ internal interface IDownloadManager : IDisposable /// with the previous two-method API. /// Task StartDownloadAsync(EventHandler progressCallback, CancellationToken cancellationToken); - - /// - /// Exception that occurred during downloads, if any. - /// - Exception DownloadException { get; } } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs index ef50b5f4a7bf..a961fa0141f8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -46,8 +46,6 @@ internal class MultipartDownloadManager : IDownloadManager private readonly SemaphoreSlim _httpConcurrencySlots; private readonly bool _ownsHttpThrottler; private readonly RequestEventHandler _requestEventHandler; - - private Exception _downloadException; private bool _disposed = false; private bool _discoveryCompleted = false; private Task _downloadCompletionTask; @@ -166,15 +164,6 @@ public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, } } - /// - public Exception DownloadException - { - get - { - return _downloadException; - } - } - /// /// Discovers the download strategy and starts concurrent downloads in a single unified operation. /// This eliminates resource leakage by managing HTTP slots and buffer capacity internally. @@ -259,7 +248,6 @@ private async Task PerformDiscoveryAsync(CancellationToken cance } catch (Exception ex) { - _downloadException = ex; _logger.Error(ex, "MultipartDownloadManager: Discovery failed"); throw; } @@ -336,7 +324,6 @@ private async Task PerformDownloadsAsync(DownloadResult downloadResult, EventHan } catch (Exception ex) { - _downloadException = ex; _logger.Error(ex, "MultipartDownloadManager: Download failed"); HandleDownloadError(ex, internalCts); @@ -414,7 +401,7 @@ private async Task StartBackgroundDownloadsAsync(DownloadResult downloadResult, _logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); // Wait for all downloads to complete (fails fast on first exception) - await TaskHelpers.WhenAllOrFirstExceptionAsync(downloadTasks, internalCts.Token).ConfigureAwait(false); + await TaskHelpers.WhenAllFailFastAsync(downloadTasks, internalCts.Token).ConfigureAwait(false); _logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); @@ -429,7 +416,6 @@ private async Task StartBackgroundDownloadsAsync(DownloadResult downloadResult, #pragma warning disable CA1031 // Do not catch general exception types catch (Exception ex) { - _downloadException = ex; HandleDownloadError(ex, internalCts); throw; } @@ -451,13 +437,21 @@ private async Task CreateDownloadTasksAsync(DownloadResult downloadResult, Event // Pre-acquire capacity in sequential order to prevent race condition deadlock // This ensures Part 2 gets capacity before Part 3, etc., preventing out-of-order // parts from consuming all buffer slots and blocking the next expected part - for (int partNum = 2; partNum <= downloadResult.TotalParts; partNum++) + for (int partNum = 2; partNum <= downloadResult.TotalParts && !internalCts.IsCancellationRequested; partNum++) { _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); // Acquire capacity sequentially - guarantees Part 2 before Part 3, etc. await _dataHandler.WaitForCapacityAsync(internalCts.Token).ConfigureAwait(false); + // Check cancellation after acquiring capacity - a task may have failed while waiting + if (internalCts.IsCancellationRequested) + { + _logger.InfoFormat("MultipartDownloadManager: [Part {0}] Stopping early - cancellation requested after capacity acquired", partNum); + _dataHandler.ReleaseCapacity(); + break; + } + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", @@ -466,6 +460,15 @@ private async Task CreateDownloadTasksAsync(DownloadResult downloadResult, Event // Acquire HTTP slot in the loop before creating task // Loop will block here if all slots are in use await _httpConcurrencySlots.WaitAsync(internalCts.Token).ConfigureAwait(false); + + // Check cancellation after acquiring HTTP slot - a task may have failed while waiting + if (internalCts.IsCancellationRequested) + { + _logger.InfoFormat("MultipartDownloadManager: [Part {0}] Stopping early - cancellation requested after HTTP slot acquired", partNum); + _httpConcurrencySlots.Release(); + _dataHandler.ReleaseCapacity(); + break; + } _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNum); @@ -478,10 +481,16 @@ private async Task CreateDownloadTasksAsync(DownloadResult downloadResult, Event { // If task creation fails, release the HTTP slot we just acquired _httpConcurrencySlots.Release(); + _dataHandler.ReleaseCapacity(); _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released due to task creation failure: {1}", partNum, ex); throw; } } + + if (internalCts.IsCancellationRequested && downloadTasks.Count < downloadResult.TotalParts - 1) + { + _logger.InfoFormat("MultipartDownloadManager: Stopped queuing early at {0} parts due to cancellation", downloadTasks.Count); + } } /// @@ -491,7 +500,7 @@ private void ValidateDownloadCompletion(int expectedTaskCount, int totalParts) { // SEP Part GET Step 6 / Ranged GET Step 8: // "validate that the total number of part GET requests sent matches with the expected PartsCount" - // Note: This should always be true if we reach this point, since WhenAllOrFirstException + // Note: This should always be true if we reach this point, since WhenAllFailFastAsync // ensures all tasks completed successfully (or threw on first failure). // The check serves as a defensive assertion for SEP compliance. // Note: expectedTaskCount + 1 accounts for Part 1 being buffered during discovery diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs index 4ca8db0c4fea..acc3dce55b6f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs @@ -27,10 +27,52 @@ namespace Amazon.S3.Transfer.Internal /// internal static class TaskHelpers { + /// + /// Waits for all tasks to complete, failing fast on the first exception. + /// When any task faults, its exception is immediately propagated without waiting for other tasks. + /// + /// List of tasks to wait for completion. This list is not modified. + /// Cancellation token to observe (not actively checked - caller handles cancellation) + /// A task that represents the completion of all tasks or throws on first exception + /// + /// This method creates an internal copy of the task list for tracking purposes, + /// so the caller's list remains unchanged after this method completes. + /// The caller is responsible for cancelling remaining tasks when this method throws. + /// + internal static async Task WhenAllFailFastAsync(List pendingTasks, CancellationToken cancellationToken) + { + var remaining = new HashSet(pendingTasks); + int total = remaining.Count; + int processed = 0; + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllFailFastAsync: Starting with TotalTasks={0}", total); + + while (remaining.Count > 0) + { + // Wait for any task to complete + var completedTask = await Task.WhenAny(remaining) + .ConfigureAwait(continueOnCapturedContext: false); + + // Process the completed task - will throw if faulted + // The caller's catch block handles cancellation AFTER this exception propagates, + // which ensures the original exception is always thrown (not OperationCanceledException) + await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + + remaining.Remove(completedTask); + processed++; + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllFailFastAsync: Task completed (Processed={0}/{1}, Remaining={2})", + processed, total, remaining.Count); + } + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllFailFastAsync: All tasks completed (Total={0})", total); + } + /// /// Waits for all tasks to complete or till any task fails or is canceled. /// - /// List of tasks to wait for completion + /// List of tasks to wait for completion. Note: This list is mutated during processing. /// Cancellation token to observe /// A task that represents the completion of all tasks or the first exception internal static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, CancellationToken cancellationToken) @@ -47,8 +89,8 @@ internal static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, var completedTask = await Task.WhenAny(pendingTasks) .ConfigureAwait(continueOnCapturedContext: false); - //If RanToCompletion a response will be returned - //If Faulted or Canceled an appropriate exception will be thrown + // If RanToCompletion a response will be returned + // If Faulted or Canceled an appropriate exception will be thrown await completedTask .ConfigureAwait(continueOnCapturedContext: false); diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs index f77562ac617f..ed047675bcb3 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -130,7 +130,6 @@ public void Constructor_WithValidParameters_CreatesCoordinator() // Assert Assert.IsNotNull(coordinator); - Assert.IsNull(coordinator.DownloadException); } [DataTestMethod] @@ -192,26 +191,6 @@ public void Constructor_WithEncryptionClient_ExceptionMessageIsDescriptive() #endregion - #region Property Tests - - [TestMethod] - public void DownloadException_InitiallyNull() - { - // Arrange - var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - - // Act - var exception = coordinator.DownloadException; - - // Assert - Assert.IsNull(exception); - } - - #endregion - #region Discovery - PART Strategy - Single Part Tests [TestMethod] @@ -1156,8 +1135,6 @@ public async Task StartDownloadsAsync_BackgroundTaskSuccess_DisposesCancellation !coordinator.DownloadCompletionTask.IsCanceled, "Background task should complete successfully"); - Assert.IsNull(coordinator.DownloadException, - "No download exception should occur"); } [TestMethod] @@ -1217,10 +1194,8 @@ public async Task StartDownloadsAsync_BackgroundTaskFailure_DisposesCancellation // Assert - Background task should have failed but cleanup should be done Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, "Background task should be completed (even with failure)"); - Assert.IsNotNull(coordinator.DownloadException, - "Download exception should be captured"); - Assert.IsInstanceOfType(coordinator.DownloadException, typeof(InvalidOperationException), - "Should capture the simulated failure"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, + "Background task should be faulted"); } [TestMethod] @@ -1270,8 +1245,7 @@ public async Task StartDownloadsAsync_EarlyError_DisposesCancellationTokenSource Assert.AreEqual("Simulated prepare failure", ex.Message); } - // Assert - Exception should be captured and no background task should exist - Assert.IsNotNull(coordinator.DownloadException, "Download exception should be captured"); + // Assert - DownloadCompletionTask should return completed task when no background work exists Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, "DownloadCompletionTask should return completed task when no background work exists"); } @@ -1341,8 +1315,8 @@ public async Task StartDownloadsAsync_BackgroundTaskCancellation_HandlesTokenDis // Assert - Cancellation should be handled properly with cleanup Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, "Background task should be completed"); - Assert.IsNotNull(coordinator.DownloadException, - "Cancellation exception should be captured"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "Background task should be faulted or canceled"); } #endregion @@ -1424,36 +1398,6 @@ public async Task StartDownloadAsync_SinglePart_WithPreCancelledToken_ThrowsOper } - [TestMethod] - public async Task DiscoverDownloadStrategyAsync_WhenCancelled_SetsDownloadException() - { - // Arrange - var mockClient = new Mock(); - var cancelledException = new OperationCanceledException(); - mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) - .ThrowsAsync(cancelledException); - - var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); - var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); - var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); - - var cts = new CancellationTokenSource(); - cts.Cancel(); - - // Act - try - { - await coordinator.StartDownloadAsync(null, cts.Token); - } - catch (OperationCanceledException) - { - // Expected - } - - // Assert - Assert.IsNotNull(coordinator.DownloadException); - Assert.IsInstanceOfType(coordinator.DownloadException, typeof(OperationCanceledException)); - } [TestMethod] public async Task DiscoverDownloadStrategyAsync_PassesCancellationTokenToS3Client() @@ -1549,13 +1493,14 @@ public async Task StartDownloadsAsync_WhenCancelledDuringDownloads_NotifiesBuffe // Expected } - // Assert - Assert.IsNotNull(coordinator.DownloadException); - Assert.IsInstanceOfType(coordinator.DownloadException, typeof(OperationCanceledException)); + // Assert - Verify DownloadCompletionTask is faulted with the cancellation exception + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, "DownloadCompletionTask should be completed"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "DownloadCompletionTask should be faulted or canceled"); } [TestMethod] - public async Task StartDownloadsAsync_WhenCancelled_SetsDownloadException() + public async Task StartDownloadsAsync_WhenCancelled_CompletionTaskIsFaulted() { // Arrange var totalParts = 3; @@ -1594,9 +1539,10 @@ public async Task StartDownloadsAsync_WhenCancelled_SetsDownloadException() // Expected } - // Assert - Assert.IsNotNull(coordinator.DownloadException); - Assert.IsInstanceOfType(coordinator.DownloadException, typeof(OperationCanceledException)); + // Assert - Verify DownloadCompletionTask is faulted with the cancellation + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, "DownloadCompletionTask should be completed"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "DownloadCompletionTask should be faulted or canceled"); } [TestMethod] @@ -1680,8 +1626,9 @@ public async Task StartDownloadsAsync_CancellationPropagatesAcrossConcurrentDown // Expected } - // Assert - Error should be captured - Assert.IsNotNull(coordinator.DownloadException); + // Assert - DownloadCompletionTask should be faulted + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "DownloadCompletionTask should be faulted or canceled when errors occur"); } [TestMethod] @@ -3418,10 +3365,8 @@ public async Task StartDownloadsAsync_BackgroundPartFails_CancelsInternalToken() Assert.IsTrue(part3SawCancellation, "Part 3 should have received cancellation via internalCts.Token (deterministic with TaskCompletionSource)"); - Assert.IsNotNull(coordinator.DownloadException, - "Download exception should be captured when background part fails"); - Assert.IsInstanceOfType(coordinator.DownloadException, typeof(InvalidOperationException), - "Download exception should be the Part 2 failure"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, + "DownloadCompletionTask should be faulted when background part fails"); } [TestMethod] @@ -3484,7 +3429,7 @@ public async Task StartDownloadsAsync_MultiplePartsFail_HandlesGracefully() // Assert - Should handle multiple failures gracefully Assert.IsTrue(failedParts.Count > 0, "At least one part should have failed"); - Assert.IsNotNull(coordinator.DownloadException, "Download exception should be captured"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, "DownloadCompletionTask should be faulted"); } [TestMethod] @@ -3559,10 +3504,674 @@ public async Task StartDownloadsAsync_CancellationRacesWithDispose_HandlesGracef // by checking IsCancellationRequested before calling Cancel() Assert.IsFalse(objectDisposedExceptionCaught, "ObjectDisposedException should not propagate due to IsCancellationRequested check"); - Assert.IsNotNull(coordinator.DownloadException, - "Download exception should be the original failure, not ObjectDisposedException"); - Assert.IsInstanceOfType(coordinator.DownloadException, typeof(InvalidOperationException), - "Download exception should be the original InvalidOperationException from Part 2 failure"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, + "DownloadCompletionTask should be faulted with the original failure"); + + // Verify the exception type via the Task's exception + var aggregateException = coordinator.DownloadCompletionTask.Exception; + Assert.IsNotNull(aggregateException, "Task should have an exception"); + Assert.IsInstanceOfType(aggregateException.InnerException, typeof(InvalidOperationException), + "Inner exception should be the original InvalidOperationException from Part 2 failure"); + } + + [TestMethod] + public async Task StartDownloadsAsync_PartFailsDuringDownload_OriginalExceptionPropagatesFromCompletionTask() + { + // Arrange - Test that when a part fails with InvalidOperationException, + // the DownloadCompletionTask throws InvalidOperationException (not OperationCanceledException) + // This validates the WhenAllOrFirstExceptionWithFaultPriorityAsync fix + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1, 2 succeed; Part 3 fails with InvalidOperationException + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum <= 2) + { + return Task.CompletedTask; // Parts 1-2 succeed + } + if (partNum == 3) + { + throw new InvalidOperationException("Simulated Part 3 failure"); + } + // Parts 4-5 may or may not run depending on cancellation timing + ct.ThrowIfCancellationRequested(); + return Task.CompletedTask; + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + Exception caughtException = null; + try + { + await coordinator.DownloadCompletionTask; + } + catch (Exception ex) + { + caughtException = ex; + } + + // Assert - The key validation: exception should be InvalidOperationException, NOT OperationCanceledException + // Before fix: WhenAllOrFirstExceptionAsync checked cancellation before processing faulted tasks, + // so OperationCanceledException would be thrown instead of the original exception + // After fix: WhenAllOrFirstExceptionWithFaultPriorityAsync checks for completed tasks first, + // ensuring the original InvalidOperationException propagates + Assert.IsNotNull(caughtException, "DownloadCompletionTask should throw an exception"); + Assert.IsInstanceOfType(caughtException, typeof(InvalidOperationException), + "DownloadCompletionTask should throw InvalidOperationException (the original failure), " + + "NOT OperationCanceledException. If this fails, WhenAllOrFirstExceptionWithFaultPriorityAsync " + + "is not properly prioritizing faulted tasks over cancellation checks."); + Assert.AreEqual("Simulated Part 3 failure", caughtException.Message, + "The original exception message should be preserved"); + + // Also verify DownloadCompletionTask is faulted + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, "DownloadCompletionTask should be faulted"); + } + + #endregion + + #region Semaphore and Capacity Release Tests + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationAfterCapacityBeforeHttpSlot_ReleasesCapacityExactlyOnce() + { + // Arrange - Test that when cancellation happens after acquiring capacity but before HTTP slot, + // capacity is released exactly once (not double-released) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + var capacityAcquireCount = 0; + var httpSlotAcquireCount = 0; + + // Use a blocking HTTP throttler that we control + var httpThrottler = new SemaphoreSlim(1, 1); + + // Control when Part 2 can acquire HTTP slot + var part2CanAcquireHttpSlot = new TaskCompletionSource(); + var part2AcquiredCapacity = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + // Track capacity acquisition + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(ct => + { + var count = Interlocked.Increment(ref capacityAcquireCount); + if (count == 2) // Part 2's capacity acquisition + { + part2AcquiredCapacity.SetResult(true); + // Wait a bit to let the cancellation happen + return Task.Delay(50); + } + return Task.CompletedTask; + }); + + // Track capacity release + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + // S3 client: Part 1 succeeds, Part 2 will be cancelled before HTTP request + var callCount = 0; + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(async (req, ct) => + { + await Task.Yield(); + var count = Interlocked.Increment(ref callCount); + if (count == 1) + { + // Part 1 discovery succeeds + return MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag"); + } + + Interlocked.Increment(ref httpSlotAcquireCount); + // Part 2 HTTP request - should not reach here if cancellation works + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + var startTask = coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for Part 1 to complete and Part 2 to acquire capacity + await startTask; + + // The background task will cancel when Part 2 tries to acquire the HTTP slot + // and finds the slot is held (we're not releasing it) + try + { + await coordinator.DownloadCompletionTask; + } + catch (OperationCanceledException) + { + // Expected - timed out or cancelled + } + catch (Exception) + { + // Other exceptions are also acceptable for this test + } + + // Assert - Capacity should be released exactly once per acquisition (no double-release) + // Part 1 capacity is released in ProcessFirstPartAsync's finally block (not ReleaseCapacity) + // Part 2+ capacity is released via ReleaseCapacity when cancellation or error occurs + Assert.IsTrue(capacityReleaseCount <= capacityAcquireCount - 1, + $"Capacity should not be double-released. Acquired={capacityAcquireCount}, Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationAfterBothAcquired_ReleasesBothExactlyOnce() + { + // Arrange - Test that when cancellation happens after acquiring both capacity and HTTP slot, + // both are released exactly once + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + var capacityAcquireCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // Track capacity acquisition + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityAcquireCount); + return Task.CompletedTask; + }); + + // Track capacity release + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 2 processing fails after both capacity and HTTP slot are acquired + mockDataHandler + .Setup(x => x.ProcessPartAsync(2, It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated Part 2 processing failure")); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure from Part 2 + } + + // Assert - HTTP semaphore should be back to initial count (all slots released) + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity releases should match acquisitions minus Part 1 (which doesn't use ReleaseCapacity) + // Part 2 will release capacity in error handler + Assert.IsTrue(capacityReleaseCount >= 1, + $"At least Part 2's capacity should be released. Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_TaskCreationFails_ReleasesHttpSlotAndCapacity() + { + // Arrange - Test that if task creation fails, both HTTP slot and capacity are released + // This tests the catch block in CreateDownloadTasksAsync + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + // S3 client: Part 1 succeeds, Part 2 HTTP request fails + var callCount = 0; + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + // Part 2 HTTP request fails + throw new AmazonS3Exception("Simulated HTTP failure"); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (AmazonS3Exception) + { + // Expected + } + + // Assert - HTTP semaphore should be fully released + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released after HTTP failure. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity should be released for failed part + Assert.IsTrue(capacityReleaseCount >= 1, + $"Capacity should be released for failed Part 2. Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_MultiplePartsFailConcurrently_NoDoubleRelease() + { + // Arrange - Test that when multiple parts fail concurrently, no double releases occur + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + var capacityAcquireCount = 0; + + var httpThrottler = new SemaphoreSlim(3, 3); // Allow 3 concurrent requests + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityAcquireCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Parts 2, 3, 4 all fail concurrently + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsInRange(2, 5, Moq.Range.Inclusive), It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated concurrent failure")); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected - first failure propagates + } + + // Assert - No double releases should occur + // HTTP semaphore should be back to initial count + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity releases should not exceed acquisitions minus Part 1 + Assert.IsTrue(capacityReleaseCount <= capacityAcquireCount - 1, + $"Capacity should not be double-released. Acquired={capacityAcquireCount}, Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationDuringCapacityWait_DoesNotReleaseUnacquiredResources() + { + // Arrange - Test that when cancellation happens DURING capacity wait, + // no resources are released (since they weren't acquired) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + var callCount = 0; + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(ct => + { + callCount++; + if (callCount == 1) + { + // Part 1 discovery succeeds + return Task.CompletedTask; + } + // Part 2 capacity wait is cancelled + cts.Cancel(); + throw new OperationCanceledException(); + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - No resources should be released for Part 2 since capacity was never acquired + Assert.AreEqual(0, capacityReleaseCount, + $"No capacity should be released when cancelled during WaitForCapacityAsync. Released={capacityReleaseCount}"); + + // HTTP semaphore should still be at initial count (Part 1's slot was released normally) + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be at initial count. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_SuccessfulDownload_AllResourcesReleasedProperly() + { + // Arrange - Test that on successful download, all resources are released properly + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityAcquireCount = 0; + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityAcquireCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // All parts succeed + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert - All resources should be released properly + // HTTP semaphore should be back to initial count + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released after successful download. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity is acquired for all parts but released differently: + // - Part 1: Capacity is managed by the stream (not via ReleaseCapacity) + // - Parts 2-4: Should NOT call ReleaseCapacity on success (handler manages it) + // Note: ReleaseCapacity is only called on ERROR paths in CreateDownloadTaskAsync + Assert.AreEqual(0, capacityReleaseCount, + $"ReleaseCapacity should not be called on success path (handler manages capacity). Released={capacityReleaseCount}"); + + // Verify all parts acquired capacity + Assert.AreEqual(totalParts, capacityAcquireCount, + $"All parts should have acquired capacity. Acquired={capacityAcquireCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationImmediatelyAfterHttpSlot_ReleasesResourcesCorrectly() + { + // Arrange - Test the specific code path where cancellation is detected + // immediately after acquiring HTTP slot (the second cancellation check in CreateDownloadTasksAsync) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(1, 1); + var initialHttpCount = httpThrottler.CurrentCount; + + var internalCts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + var capacityCallCount = 0; + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + capacityCallCount++; + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 2 processing will trigger cancellation + mockDataHandler + .Setup(x => x.ProcessPartAsync(2, It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Part 2 failure triggers cancellation")); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected + } + + // Assert - Resources should be released correctly + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Part 2 should have its capacity released due to error + Assert.IsTrue(capacityReleaseCount >= 1, + $"At least Part 2's capacity should be released on error. Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); } #endregion