diff --git a/.gitignore b/.gitignore index 5042327fe021..7f81167eff89 100644 --- a/.gitignore +++ b/.gitignore @@ -69,4 +69,6 @@ sdk/test/Performance/**/BenchmarkDotNet.Artifacts/* #protocol-tests sdk/test/ProtocolTests/Generated/**/model sdk/test/ProtocolTests/Generated/**/sources -sdk/test/ProtocolTests/Generated/**/build-info \ No newline at end of file +sdk/test/ProtocolTests/Generated/**/build-info + +.DS_Store \ No newline at end of file diff --git a/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json new file mode 100644 index 000000000000..28cd581df6e4 --- /dev/null +++ b/generator/.DevConfigs/19ed68ce-9f46-4e1e-a0ff-45a2b3641946.json @@ -0,0 +1,13 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added MaxInMemoryParts property to the Amazon.S3.Transfer.TransferUtilityOpenStreamRequest class for controlling memory usage during multipart downloads", + "Added PartSize property to the Amazon.S3.Transfer.BaseDownloadRequest class for configuring multipart download part sizes", + "Added MultipartDownloadType enum and property to the Amazon.S3.Transfer.BaseDownloadRequest class for selecting download strategy" + ] + } + ] +} diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json new file mode 100644 index 000000000000..1c6a07f688f3 --- /dev/null +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e2.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent events to the Amazon.S3.Transfer.TransferUtility.Upload operation for non-multipart uploads." + ] + } + ] +} diff --git a/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json new file mode 100644 index 000000000000..e5c52a3bcb6e --- /dev/null +++ b/generator/.DevConfigs/433a9a6d-b8ea-4676-b763-70711e8288e3.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadInitiatedEvent, UploadCompletedEvent, and UploadFailedEvent events to the Amazon.S3.Transfer.TransferUtility.Upload operation for multipart uploads." + ] + } + ] +} diff --git a/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json b/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json new file mode 100644 index 000000000000..4fb704cc474c --- /dev/null +++ b/generator/.DevConfigs/55fe9e14-c79e-4426-9828-deae0451d4f6.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added DownloadDirectoryWithResponse methods to the Amazon.S3.Transfer.ITransferUtility interface. The new operations support downloading directories using multipart download for files and return response metadata." + ] + } + ] +} diff --git a/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json new file mode 100644 index 000000000000..1ca9ea6953b9 --- /dev/null +++ b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Created new UploadWithResponse methods on the Amazon.S3.Transfer.TransferUtility class that return response metadata information." + ] + } + ] +} diff --git a/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json new file mode 100644 index 000000000000..9da2db586cf2 --- /dev/null +++ b/generator/.DevConfigs/7f23582e-3225-487b-83e7-167cf17cb234.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added DownloadDirectoryInitiatedEvent, DownloadDirectoryCompletedEvent, and DownloadDirectoryFailedEvent events to the Amazon.S3.Transfer.ITransferUtility.DownloadDirectory operation." + ] + } + ] +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json new file mode 100644 index 000000000000..c8bf4811b2fc --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872042.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added OpenStreamWithResponse method to the Amazon.S3.Transfer.ITransferUtility interface. The new operation supports downloading in parallel parts of the S3 object in the background while reading from the stream for improved performance." + ] + } + ] +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json new file mode 100644 index 000000000000..150ad4a52c6a --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872043.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added DownloadWithResponse method to the Amazon.S3.Transfer.ITransferUtility interface. The new operation supports downloading in parallel parts of the S3 object to a file for improved performance." + ] + } + ] +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json new file mode 100644 index 000000000000..118b7b6e48c1 --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadDirectoryWithResponse method to the Amazon.S3.Transfer.ITransferUtility interface." + ] + } + ] +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json new file mode 100644 index 000000000000..62482b15f9c8 --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872123.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added DownloadInitiatedEvent, DownloadCompletedEvent, and DownloadFailedEvent events to the Amazon.S3.Transfer.TransferUtility.Download operation." + ] + } + ] +} diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json new file mode 100644 index 000000000000..540544edfe00 --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadDirectoryInitiatedEvent, UploadDirectoryCompletedEvent, and UploadDirectoryFailedEvent events to the Amazon.S3.Transfer.ITransferUtility.UploadDirectory operation." + ] + } + ] +} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json new file mode 100644 index 000000000000..ccd3be4c1bf2 --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae65.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Fixed recursive property definition in the Amazon.S3.Transfer.TransferUtility internal Logger implementation" + ] + } + ] +} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json new file mode 100644 index 000000000000..21832b0bc135 --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae72.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added ContentLanguage property to the header collection of the Amazon.S3.Model.GetObjectResponse class." + ] + } + ] +} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json new file mode 100644 index 000000000000..1b73fcde33c5 --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae76.json @@ -0,0 +1,12 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added FailurePolicy property to the Amazon.S3.Transfer.TransferUtilityDownloadDirectoryRequest class to allow configuration of failure handling behavior during directory downloads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue downloading remaining files when a failure occurs.", + "Added ObjectDownloadFailedEvent event to the Amazon.S3.Transfer.TransferUtility.DownloadDirectory operation to notify users when an individual file download fails during a directory download operation. This event provides details about the failed download, including the original request, the specific file request and the exception encountered." + ] + } + ] +} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json new file mode 100644 index 000000000000..8539da002f0a --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae77.json @@ -0,0 +1,12 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added FailurePolicy property to the Amazon.S3.Transfer.TransferUtilityUploadDirectoryRequest class to allow configuration of failure handling behavior during directory uploads. The default behavior is set to abort on failure. Users can now choose to either abort the entire operation or continue uploading remaining files when a failure occurs.", + "Added ObjectUploadFailedEvent event to the Amazon.S3.Transfer.TransferUtility.UploadDirectory operation to notify users when an individual file upload fails during a directory upload operation. This event provides details about the failed upload, including the original request, the specific file request and the exception encountered." + ] + } + ] +} diff --git a/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json b/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json new file mode 100644 index 000000000000..ba09978119be --- /dev/null +++ b/generator/.DevConfigs/f8a7b6c5-d4e3-2f1a-0b9c-8d7e6f5a4b3c.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Added UploadWithResponse and UploadWithResponseAsync methods to the Amazon.S3.Transfer.ITransferUtility interface" + ] + } + ] +} diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs index 156c2b897efe..09880b4803f6 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.cs @@ -1,7 +1,7 @@ // ------------------------------------------------------------------------------ // // This code was generated by a tool. -// Runtime Version: 17.0.0.0 +// Runtime Version: 18.0.0.0 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. @@ -15,8 +15,8 @@ namespace ServiceClientGenerator.Generators.SourceFiles /// Class to produce the template output /// - #line 1 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "17.0.0.0")] + #line 1 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "18.0.0.0")] public partial class AssemblyInfo : BaseGenerator { #line hidden @@ -36,35 +36,35 @@ public override string TransformText() // associated with an assembly. [assembly: AssemblyTitle("""); - #line 12 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 12 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyTitle)); #line default #line hidden this.Write("\")]\r\n#if BCL\r\n[assembly: AssemblyDescription(\""); - #line 14 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 14 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: "4.7.2"))); #line default #line hidden this.Write("\")]\r\n#elif NETSTANDARD20\r\n[assembly: AssemblyDescription(\""); - #line 16 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 16 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: "NetStandard 2.0"))); #line default #line hidden this.Write("\")]\r\n#elif NETCOREAPP3_1\r\n[assembly: AssemblyDescription(\""); - #line 18 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 18 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: ".NET Core 3.1"))); #line default #line hidden this.Write("\")]\r\n#elif NET8_0\r\n[assembly: AssemblyDescription(\""); - #line 20 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 20 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.AssemblyDescription(versionIdentifier: ".NET 8.0"))); #line default @@ -72,7 +72,7 @@ public override string TransformText() this.Write("\")]\r\n#else\r\n#error Unknown platform constant - unable to set correct AssemblyDesc" + "ription\r\n#endif\r\n\r\n"); - #line 25 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 25 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" if (this.Config.AssemblyTitle=="AWSSDK.DynamoDBv2") { #line default @@ -81,22 +81,23 @@ public override string TransformText() [assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] "); - #line 28 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 28 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" } #line default #line hidden - #line 29 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 29 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" if (this.Config.AssemblyTitle=="AWSSDK.S3") { #line default #line hidden this.Write(@"[assembly: InternalsVisibleTo(""AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] [assembly: InternalsVisibleTo(""AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4"")] +[assembly: InternalsVisibleTo(""DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7"")] "); - #line 32 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 33 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" } #line default @@ -125,14 +126,14 @@ public override string TransformText() // [assembly: AssemblyVersion(""1.0.*"")] [assembly: AssemblyVersion("""); - #line 55 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 56 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceVersion)); #line default #line hidden this.Write("\")]\r\n[assembly: AssemblyFileVersion(\""); - #line 56 "C:\dev\repos\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" + #line 57 "D:\CodeBase\aws-sdk-net\generator\ServiceClientGeneratorLib\Generators\SourceFiles\AssemblyInfo.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.Config.ServiceFileVersion)); #line default diff --git a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt index ab2cf5d21a23..31dceb950beb 100644 --- a/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt +++ b/generator/ServiceClientGeneratorLib/Generators/SourceFiles/AssemblyInfo.tt @@ -29,6 +29,7 @@ using System.Runtime.CompilerServices; <# if (this.Config.AssemblyTitle=="AWSSDK.S3") { #> [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] +[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] <# } #> [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] diff --git a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs index bae8fc4147b5..a4155565b9fe 100644 --- a/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs +++ b/sdk/src/Services/S3/Custom/Model/GetObjectResponse.cs @@ -25,6 +25,7 @@ using Amazon.S3.Model.Internal.MarshallTransformations; using Amazon.S3; using Amazon.Runtime.Internal; +using Amazon.S3.Transfer; namespace Amazon.S3.Model { @@ -68,7 +69,6 @@ public partial class GetObjectResponse : StreamResponse private string _checksumSHA1; private string _checksumSHA256; private ChecksumType _checksumType; - private string _contentLanguage; /// /// The date and time at which the object is no longer cacheable. @@ -174,14 +174,8 @@ internal bool IsSetContentRange() /// public string ContentLanguage { - get { return this._contentLanguage; } - set { this._contentLanguage = value; } - } - - // Check to see if ContentLanguage property is set - internal bool IsSetContentLanguage() - { - return this._contentLanguage != null; + get { return this.Headers.ContentLanguage; } + set { this.Headers.ContentLanguage = value; } } /// @@ -909,6 +903,59 @@ private void ValidateWrittenStreamSize(long bytesWritten) } #if BCL || NETSTANDARD + /// + /// Copies data from ResponseStream to destination stream with progress tracking and validation. + /// Internal method to enable reuse across different download scenarios. + /// + /// Stream to write data to + /// File path for progress event reporting (can be null) + /// Buffer size for reading/writing operations + /// Cancellation token + /// Whether to validate copied bytes match ContentLength + internal async System.Threading.Tasks.Task WriteResponseStreamAsync( + Stream destinationStream, + string filePath, + int bufferSize, + System.Threading.CancellationToken cancellationToken, + bool validateSize = true) + { + long current = 0; +#if NETSTANDARD + Stream stream = this.ResponseStream; +#else + Stream stream = new BufferedStream(this.ResponseStream); +#endif + byte[] buffer = new byte[bufferSize]; + int bytesRead = 0; + long totalIncrementTransferred = 0; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length, cancellationToken) + .ConfigureAwait(continueOnCapturedContext: false)) > 0) + { + cancellationToken.ThrowIfCancellationRequested(); + + await destinationStream.WriteAsync(buffer, 0, bytesRead, cancellationToken) + .ConfigureAwait(continueOnCapturedContext: false); + current += bytesRead; + totalIncrementTransferred += bytesRead; + + if (totalIncrementTransferred >= AWSSDKUtils.DefaultProgressUpdateInterval) + { + this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed: false); + totalIncrementTransferred = 0; + } + } + + if (validateSize) + { + ValidateWrittenStreamSize(current); + } + + // Encrypted objects may have size smaller than the total amount of data transferred due to padding. + // Instead of changing the file size or the total downloaded size, pass a flag that indicates transfer is complete. + this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed: true); + } + /// /// Writes the content of the ResponseStream a file indicated by the filePath argument. /// @@ -929,37 +976,8 @@ public async System.Threading.Tasks.Task WriteResponseStreamToFileAsync(string f try { - long current = 0; -#if NETSTANDARD - Stream stream = this.ResponseStream; -#else - Stream stream = new BufferedStream(this.ResponseStream); -#endif - byte[] buffer = new byte[S3Constants.DefaultBufferSize]; - int bytesRead = 0; - long totalIncrementTransferred = 0; - while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false)) > 0) - { - cancellationToken.ThrowIfCancellationRequested(); - - await downloadStream.WriteAsync(buffer, 0, bytesRead, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); - current += bytesRead; - totalIncrementTransferred += bytesRead; - - if (totalIncrementTransferred >= AWSSDKUtils.DefaultProgressUpdateInterval) - { - this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed:false); - totalIncrementTransferred = 0; - } - } - - ValidateWrittenStreamSize(current); - - // Encrypted objects may have size smaller than the total amount of data trasnfered due to padding. - // Instead of changing the file size or the total downloaded size, pass a flag that indicate that the transfer is complete. - this.OnRaiseProgressEvent(filePath, totalIncrementTransferred, current, this.ContentLength, completed:true); + await WriteResponseStreamAsync(downloadStream, filePath, S3Constants.DefaultBufferSize, cancellationToken, validateSize: true) + .ConfigureAwait(continueOnCapturedContext: false); } finally { @@ -1042,5 +1060,10 @@ internal WriteObjectProgressArgs(string bucketName, string key, string filePath, /// True if writing is complete /// public bool IsCompleted { get; private set; } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// + public TransferUtilityDownloadRequest Request { get; internal set; } } } diff --git a/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs b/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs index 410f93fd4342..2e0b4a0197b9 100644 --- a/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs +++ b/sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/GetObjectResponseUnmarshaller.cs @@ -86,7 +86,7 @@ private static void UnmarshallResult(XmlUnmarshallerContext context,GetObjectRes if (responseData.IsHeaderPresent("Content-Encoding")) response.Headers.ContentEncoding = S3Transforms.ToString(responseData.GetHeaderValue("Content-Encoding")); if (responseData.IsHeaderPresent("Content-Language")) - response.ContentLanguage = S3Transforms.ToString(responseData.GetHeaderValue("Content-Language")); + response.Headers.ContentLanguage = S3Transforms.ToString(responseData.GetHeaderValue("Content-Language")); if (responseData.IsHeaderPresent("Content-Length")) response.Headers.ContentLength = long.Parse(responseData.GetHeaderValue("Content-Length"), CultureInfo.InvariantCulture); if (responseData.IsHeaderPresent("x-amz-object-lock-legal-hold")) diff --git a/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs index 84bc08c29225..fb85827cfafd 100644 --- a/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/BaseDownloadRequest.cs @@ -28,6 +28,22 @@ namespace Amazon.S3.Transfer { + /// + /// Specifies the strategy for multipart downloads + /// + public enum MultipartDownloadType + { + /// + /// Use part-based downloads with original upload part boundaries + /// + PART, + + /// + /// Use range-based downloads with configurable part sizes + /// + RANGE + } + /// /// The base class for requests that return Amazon S3 objects. /// @@ -50,6 +66,8 @@ public abstract class BaseDownloadRequest private string ifMatch; private string ifNoneMatch; private ResponseHeaderOverrides responseHeaders; + private long? partSize; + private MultipartDownloadType multipartDownloadType = MultipartDownloadType.PART; /// /// Gets or sets the name of the bucket. @@ -330,5 +348,45 @@ public ResponseHeaderOverrides ResponseHeaderOverrides this.responseHeaders = value; } } + + /// + /// Gets or sets the part size of the download in bytes. + /// The downloaded file will be divided into + /// parts the size specified and + /// downloaded from Amazon S3 individually. + /// This is used when MultipartDownloadType is set to RANGE. + /// + /// + /// The part size of the download. + /// + public long PartSize + { + get { return this.partSize.GetValueOrDefault(); } + set { this.partSize = value; } + } + + /// + /// Checks if PartSize property is set. + /// + /// true if PartSize property is set. + internal bool IsSetPartSize() + { + return this.partSize.HasValue; + } + + /// + /// Gets or sets the type of multipart download to use. + /// PART: Uses part GET with original part sizes from upload (ignores PartSize) + /// RANGE: Uses ranged GET with PartSize to determine ranges + /// Default is PART + /// + /// + /// The multipart download type. + /// + public MultipartDownloadType MultipartDownloadType + { + get { return this.multipartDownloadType; } + set { this.multipartDownloadType = value; } + } } -} \ No newline at end of file +} diff --git a/sdk/src/Services/S3/Custom/Transfer/DirectoryResult.cs b/sdk/src/Services/S3/Custom/Transfer/DirectoryResult.cs new file mode 100644 index 000000000000..5329b21e07f2 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/DirectoryResult.cs @@ -0,0 +1,43 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +namespace Amazon.S3.Transfer +{ + /// + /// Overall outcome of a directory operation. + /// + public enum DirectoryResult + { + /// + /// All objects processed successfully. + /// + Success, + /// + /// Some objects succeeded and some failed. + /// + PartialSuccess, + /// + /// All attempted objects failed. + /// + Failure + } +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/FailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/FailurePolicy.cs new file mode 100644 index 000000000000..5bf16b176a75 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/FailurePolicy.cs @@ -0,0 +1,40 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +namespace Amazon.S3.Transfer +{ + /// + /// Specifies the policy to apply when a failure occurs during a directory transfer operation. + /// + public enum FailurePolicy + { + /// + /// Abort the directory transfer operation on failure. + /// + AbortOnFailure, + + /// + /// Continue the directory transfer operation despite failures. + /// + ContinueOnFailure + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs index a0313c75b6c0..9bf25dc5421d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs @@ -28,7 +28,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityAbortMultipartUploadRequest _request; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortOnFailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortOnFailurePolicy.cs new file mode 100644 index 000000000000..258a2cc57301 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortOnFailurePolicy.cs @@ -0,0 +1,72 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Failure policy that cancels all related operations and rethrows the exception when + /// an action fails. + /// + /// + /// Use this policy when any single failure should abort the entire higher-level operation. + /// When an executed under this policy throws, the policy will cancel + /// the provided , invoke an optional failure callback, + /// and then rethrow the exception so the caller can observe the original failure. + /// + internal class AbortOnFailurePolicy : IFailurePolicy + { + /// + /// Executes the provided asynchronous under the abort-on-failure policy. + /// + /// An asynchronous delegate that performs the work to execute under the policy. + /// An optional callback that will be invoked with the exception if fails. + /// A that will be canceled by this policy to signal termination + /// of related work when a failure occurs. + /// + /// A that completes with true when completes successfully. + /// If fails, this method cancels , invokes + /// if provided, and rethrows the original exception; it does not return false. + /// + public async Task ExecuteAsync(Func action, Action onFailure, CancellationTokenSource cancellationTokenSource) + { + try + { + await action().ConfigureAwait(false); + + return true; + } + catch (Exception ex) + { + // Cancel all pending operations before propagating the exception + cancellationTokenSource?.Cancel(); + + onFailure?.Invoke(ex); + + throw; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs new file mode 100644 index 000000000000..70756ff10be3 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AtomicFileHandler.cs @@ -0,0 +1,198 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.IO; +using System.Security.Cryptography; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Handles atomic file operations for multipart downloads using SEP-compliant temporary file pattern. + /// Creates .s3tmp.{uniqueId} files and ensures atomic commits to prevent partial file corruption. + /// + internal class AtomicFileHandler : IDisposable + { + private string _tempFilePath; + private bool _disposed = false; + private static readonly object _fileLock = new object(); + + /// + /// Creates a temporary file with unique identifier for atomic operations. + /// Pattern: {destinationPath}.s3tmp.{8-char-unique-id} + /// Uses FileMode.CreateNew for atomic file creation (no race condition). + /// + public string CreateTemporaryFile(string destinationPath) + { + if (string.IsNullOrEmpty(destinationPath)) + throw new ArgumentException("Destination path cannot be null or empty", nameof(destinationPath)); + + // Create directory if it doesn't exist (Directory.CreateDirectory is idempotent) + var directory = Path.GetDirectoryName(destinationPath); + if (!string.IsNullOrEmpty(directory)) + { + Directory.CreateDirectory(directory); + } + + // Try up to 100 times to create unique file atomically + for (int attempt = 0; attempt < 100; attempt++) + { + var uniqueId = GenerateRandomId(8); + var tempPath = $"{destinationPath}.s3tmp.{uniqueId}"; + + try + { + // FileMode.CreateNew fails atomically if file exists - no race condition + using (var stream = new FileStream(tempPath, FileMode.CreateNew, FileAccess.Write)) + { + // File created successfully - immediately close it + } + + _tempFilePath = tempPath; + return tempPath; + } + catch (IOException) + { + lock (_fileLock) + { + // If the file now exists when we check immediately after the exception, + // it means another process or thread beat us to the creation (race condition). + if (File.Exists(tempPath)) + { + // File exists, try again with new ID + continue; + } + else + { + // The file does *not* exist, which means the IOException was caused by + // something else entirely (e.g., permissions, disk full, network error). + throw; // Re-throw the original exception as it was an unexpected error. + } + } + } + } + + throw new InvalidOperationException("Unable to generate unique temporary file name after 100 attempts"); + } + + /// + /// Atomically commits the temporary file to the final destination. + /// Uses File.Replace for atomic replacement when destination exists, or File.Move for new files. + /// This prevents data loss if the process crashes during commit. + /// + public void CommitFile(string tempPath, string destinationPath) + { + if (string.IsNullOrEmpty(tempPath)) + throw new ArgumentException("Temp path cannot be null or empty", nameof(tempPath)); + if (string.IsNullOrEmpty(destinationPath)) + throw new ArgumentException("Destination path cannot be null or empty", nameof(destinationPath)); + + if (!File.Exists(tempPath)) + throw new FileNotFoundException($"Temporary file not found: {tempPath}"); + + try + { + // Use File.Replace for atomic replacement when overwriting existing file + // This prevents data loss if process crashes between delete and move operations + // File.Replace is atomic on Windows (ReplaceFile API) and Unix (rename syscall) + if (File.Exists(destinationPath)) + { + File.Replace(tempPath, destinationPath, null); + } + else + { + // For new files, File.Move is sufficient and atomic on same volume + File.Move(tempPath, destinationPath); + } + + if (_tempFilePath == tempPath) + _tempFilePath = null; // Successfully committed + } + catch (Exception ex) + { + throw new InvalidOperationException($"Failed to commit temporary file {tempPath} to {destinationPath}", ex); + } + } + + /// + /// Cleans up temporary file in case of failure or cancellation. + /// Safe to call multiple times - File.Delete() is idempotent (no-op if file doesn't exist). + /// + public void CleanupOnFailure(string tempPath = null) + { + var pathToClean = string.IsNullOrEmpty(tempPath) ? _tempFilePath : tempPath; + + if (string.IsNullOrEmpty(pathToClean)) + return; + + try + { + // File.Delete() is idempotent - doesn't throw if file doesn't exist + File.Delete(pathToClean); + + if (_tempFilePath == pathToClean) + _tempFilePath = null; + } + catch (IOException) + { + // Log warning but don't throw - cleanup is best effort + // In production, this would use proper logging infrastructure + } + catch (UnauthorizedAccessException) + { + // Log warning but don't throw - cleanup is best effort + } + } + + /// + /// Generates a cryptographically secure random identifier of specified length. + /// Uses base32 encoding to avoid filesystem-problematic characters. + /// + private string GenerateRandomId(int length) + { + const string base32Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; // RFC 4648 base32 + + using (var rng = RandomNumberGenerator.Create()) + { + var bytes = new byte[length]; + rng.GetBytes(bytes); + + var result = new char[length]; + for (int i = 0; i < length; i++) + { + result[i] = base32Chars[bytes[i] % base32Chars.Length]; + } + + return new string(result); + } + } + + public void Dispose() + { + if (!_disposed) + { + // Cleanup any remaining temp file + CleanupOnFailure(); + _disposed = true; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs index 428758fa54e6..da7e39c69e3c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs @@ -30,52 +30,19 @@ namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + /// + /// Generic base command that returns a typed response + /// + /// Type of response returned by the command + internal abstract partial class BaseCommand where TResponse : class { - public virtual object Return - { - get { return null; } - } - internal GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) { - GetObjectRequest getRequest = new GetObjectRequest() - { - BucketName = request.BucketName, - Key = request.Key, - VersionId = request.VersionId - }; - ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)getRequest).AddBeforeRequestHandler(this.RequestEventHandler); - - if (request.IsSetModifiedSinceDate()) - { - getRequest.ModifiedSinceDate = request.ModifiedSinceDate; - } - if (request.IsSetUnmodifiedSinceDate()) - { - getRequest.UnmodifiedSinceDate = request.UnmodifiedSinceDate; - } - - getRequest.ServerSideEncryptionCustomerMethod = request.ServerSideEncryptionCustomerMethod; - getRequest.ServerSideEncryptionCustomerProvidedKey = request.ServerSideEncryptionCustomerProvidedKey; - getRequest.ServerSideEncryptionCustomerProvidedKeyMD5 = request.ServerSideEncryptionCustomerProvidedKeyMD5; - getRequest.ChecksumMode = request.ChecksumMode; - getRequest.RequestPayer = request.RequestPayer; - - if (request.IsSetExpectedBucketOwner()) - { - getRequest.ExpectedBucketOwner = request.ExpectedBucketOwner; - } - if (request.IsSetIfMatch()) - { - getRequest.EtagToMatch = request.IfMatch; - } - if (request.IsSetIfNoneMatch()) - { - getRequest.EtagToNotMatch = request.IfNoneMatch; - } + // Use centralized request mapping + GetObjectRequest getRequest = RequestMapper.MapToGetObjectRequest(request); - getRequest.ResponseHeaderOverrides = request.ResponseHeaderOverrides; + // Add command-specific event handler + ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)getRequest).AddBeforeRequestHandler(this.RequestEventHandler); return getRequest; } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDataSource.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDataSource.cs new file mode 100644 index 000000000000..db6ae3c8d00c --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDataSource.cs @@ -0,0 +1,156 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using Amazon.Runtime.Internal.Util; +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// ArrayPool-based buffered data source that reads from pre-buffered part data. + /// Manages ArrayPool lifecycle and provides efficient buffer-to-buffer copying. + /// + internal class BufferedDataSource : IPartDataSource + { + private readonly StreamPartBuffer _partBuffer; + private bool _disposed = false; + + #region Logger + + private Logger Logger + { + get + { + return Logger.GetLogger(typeof(TransferUtility)); + } + } + + #endregion + + /// + public int PartNumber => _partBuffer.PartNumber; + + /// + public bool IsComplete => _partBuffer.RemainingBytes == 0; + + /// + /// Initializes a new instance of the class. + /// + /// The containing the buffered part data. + /// Thrown when is null. + public BufferedDataSource(StreamPartBuffer partBuffer) + { + _partBuffer = partBuffer ?? throw new ArgumentNullException(nameof(partBuffer)); + + Logger.DebugFormat("BufferedDataSource: Created for part {0} (BufferLength={1}, RemainingBytes={2})", + _partBuffer.PartNumber, _partBuffer.Length, _partBuffer.RemainingBytes); + } + + /// + public Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + try + { + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + if (_partBuffer.RemainingBytes == 0) + { + Logger.DebugFormat("BufferedDataSource: [Part {0}] Reached end of buffer (RemainingBytes=0)", _partBuffer.PartNumber); + return Task.FromResult(0); // End of part + } + + // Calculate bytes to copy from buffered part + var availableBytes = _partBuffer.RemainingBytes; + var bytesToRead = Math.Min(count, availableBytes); + + Logger.DebugFormat("BufferedDataSource: [Part {0}] Reading {1} bytes (Requested={2}, Available={3}, CurrentPosition={4})", + _partBuffer.PartNumber, bytesToRead, count, availableBytes, _partBuffer.CurrentPosition); + + Buffer.BlockCopy( + _partBuffer.ArrayPoolBuffer, // Source: ArrayPool buffer + _partBuffer.CurrentPosition, // Source offset + buffer, // Destination: user buffer + offset, // Destination offset + bytesToRead // Bytes to copy + ); + + // Update position in the part buffer + _partBuffer.CurrentPosition += bytesToRead; + + Logger.DebugFormat("BufferedDataSource: [Part {0}] Read complete (BytesRead={1}, NewPosition={2}, RemainingBytes={3}, IsComplete={4})", + _partBuffer.PartNumber, bytesToRead, _partBuffer.CurrentPosition, _partBuffer.RemainingBytes, IsComplete); + + return Task.FromResult(bytesToRead); + } + catch (Exception ex) + { + Logger.Error(ex, "BufferedDataSource: [Part {0}] Error during read: {1}", _partBuffer.PartNumber, ex.Message); + + // On any error during read (including validation), mark the buffer as consumed to prevent further reads + _partBuffer.CurrentPosition = _partBuffer.Length; + throw; + } + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(BufferedDataSource)); + } + + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + Logger.DebugFormat("BufferedDataSource: [Part {0}] Disposing (Returning buffer to ArrayPool)", _partBuffer.PartNumber); + + // Dispose the underlying StreamPartBuffer, which returns ArrayPool buffer to pool + _partBuffer?.Dispose(); + } + catch (Exception ex) + { + Logger.Error(ex, "BufferedDataSource: [Part {0}] Error during disposal: {1}", _partBuffer.PartNumber, ex.Message); + + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDownloadConfiguration.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDownloadConfiguration.cs new file mode 100644 index 000000000000..3d0f0b2baf1e --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedDownloadConfiguration.cs @@ -0,0 +1,66 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Configuration settings for buffered multipart downloads to streams. + /// Extends base coordinator settings with buffer-specific parameters. + /// + internal class BufferedDownloadConfiguration : DownloadManagerConfiguration + { + /// + /// Maximum parts to keep in memory simultaneously. + /// + public int MaxInMemoryParts { get; set; } + + /// + /// Buffer size for I/O operations. + /// + public int BufferSize { get; set; } + + /// + /// Creates a BufferedDownloadConfiguration with the specified configuration values. + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// Maximum number of parts to keep in memory simultaneously. + /// Buffer size used for optimal I/O operations. + /// Target size for each part in bytes. + /// Thrown when any parameter is less than or equal to 0. + public BufferedDownloadConfiguration( + int concurrentServiceRequests, + int maxInMemoryParts, + int bufferSize, + long targetPartSizeBytes) + : base(concurrentServiceRequests, targetPartSizeBytes) + { + if (maxInMemoryParts <= 0) + throw new ArgumentOutOfRangeException(nameof(maxInMemoryParts), "Must be greater than 0"); + if (bufferSize <= 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize), "Must be greater than 0"); + + MaxInMemoryParts = maxInMemoryParts; + BufferSize = bufferSize; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs new file mode 100644 index 000000000000..fa3616a34f3d --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedMultipartStream.cs @@ -0,0 +1,314 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Stream implementation for SEP-compliant multipart downloads to streams. + /// Uses modular architecture with dependency injection for improved maintainability and testability. + /// Supports both single-part and multipart downloads with optimal performance for each scenario. + /// + internal class BufferedMultipartStream : Stream + { + private readonly IDownloadManager _downloadCoordinator; + private readonly IPartBufferManager _partBufferManager; + private readonly BufferedDownloadConfiguration _config; + + private bool _initialized = false; + private bool _disposed = false; + private DownloadResult _discoveryResult; + private long _totalBytesRead = 0; + + private readonly Logger _logger = Logger.GetLogger(typeof(BufferedMultipartStream)); + + /// + /// Gets the containing metadata from the initial GetObject response. + /// Available after completes successfully. + /// + public DownloadResult DiscoveryResult => _discoveryResult; + + /// + /// Creates a new with dependency injection. + /// + /// that coordinates download discovery and orchestration. + /// that manages part buffer lifecycle and synchronization. + /// with settings for the stream. + public BufferedMultipartStream(IDownloadManager downloadCoordinator, IPartBufferManager partBufferManager, BufferedDownloadConfiguration config) + { + _downloadCoordinator = downloadCoordinator ?? throw new ArgumentNullException(nameof(downloadCoordinator)); + _partBufferManager = partBufferManager ?? throw new ArgumentNullException(nameof(partBufferManager)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + } + + /// + /// Factory method to create with default dependencies. + /// + /// client for making requests. + /// with stream request parameters. + /// with transfer utility configuration. + /// Optional for user agent tracking. + /// A new instance. + public static BufferedMultipartStream Create(IAmazonS3 s3Client, TransferUtilityOpenStreamRequest request, TransferUtilityConfig transferConfig, RequestEventHandler requestEventHandler = null) + { + if (s3Client == null) throw new ArgumentNullException(nameof(s3Client)); + if (request == null) throw new ArgumentNullException(nameof(request)); + if (transferConfig == null) throw new ArgumentNullException(nameof(transferConfig)); + + // Determine target part size from request or use 8MB default + long targetPartSize = request.IsSetPartSize() + ? request.PartSize + : S3Constants.DefaultPartSize; + + var config = new BufferedDownloadConfiguration( + transferConfig.ConcurrentServiceRequests, + request.MaxInMemoryParts, + s3Client.Config.BufferSize, + targetPartSize); + + var partBufferManager = new PartBufferManager(config); + var dataHandler = new BufferedPartDataHandler(partBufferManager, config); + var downloadCoordinator = new MultipartDownloadManager(s3Client, request, config, dataHandler, requestEventHandler); + + return new BufferedMultipartStream(downloadCoordinator, partBufferManager, config); + } + + /// + /// Initialize the stream by discovering download strategy and setting up appropriate handlers. + /// + /// Cancellation token for the initialization operation. + public async Task InitializeAsync(CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (_initialized) + throw new InvalidOperationException("Stream has already been initialized"); + + _logger.DebugFormat("BufferedMultipartStream: Starting initialization"); + + // Start unified download operation (discovers strategy and starts downloads) + _discoveryResult = await _downloadCoordinator.StartDownloadAsync(null, cancellationToken) + .ConfigureAwait(false); + + _logger.DebugFormat("BufferedMultipartStream: Download started - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", + _discoveryResult.ObjectSize, + _discoveryResult.TotalParts, + _discoveryResult.IsSinglePart); + + _initialized = true; + _logger.DebugFormat("BufferedMultipartStream: Initialization completed successfully"); + } + + /// + /// Asynchronously reads a sequence of bytes from the stream and advances the position within the stream by the number of bytes read. + /// + /// The buffer to read data into. + /// The byte offset in at which to begin storing data. + /// The maximum number of bytes to read. + /// A token to cancel the read operation. + /// + /// A task that represents the asynchronous read operation. The value of the task's result contains the total number of bytes read into the buffer. + /// This can be less than the number of bytes requested if that number of bytes are not currently available, or zero if the end of the stream is reached. + /// + /// The stream has been disposed. + /// The stream has not been initialized. Call first. + /// is null. + /// or is negative. + /// The sum of and is greater than the buffer length. + /// + /// This method reads data from the underlying which coordinates sequential reading + /// from buffered multipart download data. The method automatically handles reading across part boundaries to fill + /// the provided buffer when possible, matching standard behavior. + /// + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (!_initialized) + throw new InvalidOperationException("Stream must be initialized before reading. Call InitializeAsync first."); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + var currentPosition = Interlocked.Read(ref _totalBytesRead); + _logger.DebugFormat("BufferedMultipartStream: ReadAsync called - Position={0}, RequestedBytes={1}", + currentPosition, count); + + var bytesRead = await _partBufferManager.ReadAsync(buffer, offset, count, cancellationToken) + .ConfigureAwait(false); + + // Track total bytes read for Position property + if (bytesRead > 0) + { + Interlocked.Add(ref _totalBytesRead, bytesRead); + _logger.DebugFormat("BufferedMultipartStream: ReadAsync completed - BytesRead={0}, NewPosition={1}", + bytesRead, currentPosition + bytesRead); + } + else + { + _logger.DebugFormat("BufferedMultipartStream: ReadAsync returned EOF (0 bytes)"); + } + + return bytesRead; + } + + + #region Stream Implementation + + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + + public override long Length + { + get + { + if (!_initialized) + throw new InvalidOperationException("Stream must be initialized before accessing Length"); + return _discoveryResult.ObjectSize; + } + } + + public override long Position + { + get + { + if (!_initialized) + throw new InvalidOperationException("Stream must be initialized before accessing Position"); + return Interlocked.Read(ref _totalBytesRead); + } + set => throw new NotSupportedException("Position setter not supported for read-only streams"); + } + + /// + /// Flushes any buffered data to the underlying stream. This is a no-op for read-only streams. + /// + public override void Flush() { } + + /// + /// Asynchronously flushes any buffered data to the underlying stream. This is a no-op for read-only streams. + /// + /// A token to cancel the operation. + /// A completed task. + public override Task FlushAsync(CancellationToken cancellationToken) => Task.CompletedTask; + + /// + /// Synchronously reads a sequence of bytes from the stream and advances the position within the stream by the number of bytes read. + /// + /// The buffer to read data into. + /// The byte offset in at which to begin storing data. + /// The maximum number of bytes to read. + /// + /// The total number of bytes read into the buffer. This can be less than the number of bytes requested if that number of bytes + /// are not currently available, or zero if the end of the stream is reached. + /// + /// + /// This is a synchronous wrapper around . + /// For better performance, prefer using the asynchronous version when possible. + /// + public override int Read(byte[] buffer, int offset, int count) + { + return ReadAsync(buffer, offset, count).GetAwaiter().GetResult(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("Seek not supported for multipart download streams"); + } + + public override void SetLength(long value) + { + throw new NotSupportedException("SetLength not supported for read-only streams"); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("Write not supported for read-only streams"); + } + + #endregion + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(BufferedMultipartStream)); + } + + #region Dispose Pattern + + /// + /// Releases the unmanaged resources used by the and optionally releases the managed resources. + /// + /// + /// true to release both managed and unmanaged resources; false to release only unmanaged resources. + /// + /// + /// This method disposes the underlying and , + /// which in turn cleans up any buffered part data and returns ArrayPool buffers to the pool. + /// It also disposes the InitialResponse from the discovery result, which contains the HTTP connection + /// and network stream that must be explicitly disposed to return the connection to the pool. + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + protected override void Dispose(bool disposing) + { + if (!_disposed && disposing) + { + try + { + // Dispose InitialResponse first (contains HTTP connection and network stream) + // This is critical because GetObjectResponse holds unmanaged resources that + // won't be cleaned up by GC - must be explicitly disposed to return HTTP + // connection to the pool and close network streams + _discoveryResult?.InitialResponse?.Dispose(); + + // Then dispose modular dependencies + _downloadCoordinator?.Dispose(); + _partBufferManager?.Dispose(); + } + catch (Exception) + { + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + + base.Dispose(disposing); + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs new file mode 100644 index 000000000000..2bb4cf198eb6 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BufferedPartDataHandler.cs @@ -0,0 +1,335 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Buffers; +using System.Threading; +using System.Threading.Tasks; +using System.IO; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Handles multipart download data with intelligent stream-vs-buffer decision making. + /// Optimizes for sequential part arrival by streaming directly to consumer when possible, + /// while buffering out-of-order parts into memory using . + /// + /// + /// Optimization Strategy: + /// + /// Parts arriving in expected order (matching NextExpectedPartNumber) stream directly to consumer + /// Out-of-order parts buffer into ArrayPool memory for later sequential consumption + /// Best case: All parts in order → zero buffering → pure streaming + /// Worst case: All parts out of order → full buffering (original behavior) + /// + /// + /// + /// Response Ownership: + /// + /// Streaming: StreamingDataSource takes ownership and disposes after reading + /// Buffering: Handler disposes response immediately after buffering completes + /// + /// + internal class BufferedPartDataHandler : IPartDataHandler + { + private readonly IPartBufferManager _partBufferManager; + private readonly BufferedDownloadConfiguration _config; + + private readonly Logger _logger = Logger.GetLogger(typeof(BufferedPartDataHandler)); + + /// + /// Initializes a new instance of the class. + /// + /// The for managing part buffers. + /// The with buffering settings. + /// Thrown when any required parameter is null. + public BufferedPartDataHandler( + IPartBufferManager partBufferManager, + BufferedDownloadConfiguration config) + { + _partBufferManager = partBufferManager ?? throw new ArgumentNullException(nameof(partBufferManager)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + } + + public Task PrepareAsync(DownloadResult discoveryResult, CancellationToken cancellationToken) + { + // No preparation needed for buffered handler - buffers are created on demand + return Task.CompletedTask; + } + + /// + /// + /// + /// Intelligently chooses between streaming and buffering based on part arrival order: + /// + /// + /// If partNumber matches NextExpectedPartNumber: Stream directly (no buffering) + /// Otherwise: Buffer into memory for later sequential consumption + /// + /// Response Ownership: + /// + /// This method takes ownership of the response and is responsible for disposing it in ALL cases, + /// including error scenarios. Callers must NOT dispose the response after calling this method. + /// + /// + public async Task ProcessPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + if (partNumber == _partBufferManager.NextExpectedPartNumber) + { + ProcessStreamingPart(partNumber, response); + } + else + { + await ProcessBufferedPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + } + } + + /// + /// Processes a part that arrives in expected order by streaming it directly without buffering. + /// Takes ownership of the response and transfers it to the StreamingDataSource. + /// + /// The part number being processed. + /// The GetObjectResponse containing the part data. Ownership is transferred to StreamingDataSource. + /// + /// This method is called when the part arrives in the expected sequential order, allowing + /// for optimal zero-copy streaming directly to the consumer without buffering into memory. + /// + /// OWNERSHIP TRANSFER: + /// 1. Response is passed to StreamingDataSource constructor (StreamingDataSource takes ownership) + /// 2. StreamingDataSource is added to buffer manager (buffer manager takes ownership) + /// 3. After successful AddBufferAsync, we null out our reference to mark ownership transfer + /// 4. Buffer manager will dispose StreamingDataSource (which disposes response) during cleanup + /// + /// ERROR HANDLING: + /// - If StreamingDataSource constructor fails: We dispose the response (still our responsibility) + /// - If constructor succeeds but AddBufferAsync fails: StreamingDataSource.Dispose() handles the response + /// - If AddBufferAsync succeeds: Buffer manager owns everything and will clean up + /// + private void ProcessStreamingPart( + int partNumber, + GetObjectResponse response) + { + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Matches NextExpectedPartNumber - streaming directly without buffering", + partNumber); + + StreamingDataSource streamingDataSource = null; + var ownsResponse = true; // Track if we still own the response + + try + { + // Create a StreamingDataSource that will stream directly from the response + // If successful, StreamingDataSource takes ownership of the response and will dispose it + streamingDataSource = new StreamingDataSource(partNumber, response); + ownsResponse = false; // Ownership transferred to StreamingDataSource + + // Add the streaming data source to the buffer manager + // After this succeeds, the buffer manager owns the data source + _partBufferManager.AddBuffer(streamingDataSource); + + // Mark ownership transfer by nulling our reference + // If ReleaseBufferSpace() throws, we no longer own the data source, so we won't dispose it + streamingDataSource = null; + + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] StreamingDataSource added and capacity released", + partNumber); + } + catch (Exception ex) + { + _logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process streaming part", partNumber); + + // Dispose response if we still own it (constructor failed before taking ownership) + if (ownsResponse) + response?.Dispose(); + + // Dispose StreamingDataSource if we created it but buffer manager doesn't own it yet + // If null, the buffer manager owns it and will handle cleanup + streamingDataSource?.Dispose(); + + throw; + } + } + + /// + /// Processes a part that arrives out of order by buffering it into memory. + /// Takes ownership of the response and disposes it after buffering completes. + /// + /// The part number being processed. + /// The GetObjectResponse containing the part data. This method owns and disposes it. + /// Cancellation token for the operation. + /// + /// This method is called when the part arrives out of the expected sequential order. + /// The part data is buffered into ArrayPool memory for later sequential consumption. + /// + /// OWNERSHIP: + /// - Response is read and buffered into StreamPartBuffer + /// - Response is disposed immediately after buffering (no longer needed) + /// - StreamPartBuffer is added to buffer manager (buffer manager takes ownership) + /// - Buffer manager will dispose StreamPartBuffer during cleanup + /// + /// ERROR HANDLING: + /// - Always dispose response in catch block since we own it throughout this method + /// - BufferPartFromResponseAsync handles its own cleanup of StreamPartBuffer on error + /// + private async Task ProcessBufferedPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Out of order (NextExpected={1}) - buffering to memory", + partNumber, _partBufferManager.NextExpectedPartNumber); + + try + { + // Buffer the part from the response stream into memory + var buffer = await BufferPartFromResponseAsync( + partNumber, + response, + cancellationToken).ConfigureAwait(false); + + // Response has been fully read and buffered - dispose it now + response?.Dispose(); + + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Buffered {1} bytes into memory", + partNumber, buffer.Length); + + // Add the buffered part to the buffer manager + _partBufferManager.AddBuffer(buffer); + + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Added to buffer manager (capacity will be released after consumption)", + partNumber); + } + catch (Exception ex) + { + _logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to process buffered part", partNumber); + + // We own the response throughout this method, so dispose it on error + response?.Dispose(); + + throw; + } + } + + /// + public Task WaitForCapacityAsync(CancellationToken cancellationToken) + { + return _partBufferManager.WaitForBufferSpaceAsync(cancellationToken); + } + + /// + public void ReleaseCapacity() + { + _partBufferManager.ReleaseBufferSpace(); + } + + /// + public void OnDownloadComplete(Exception exception) + { + _partBufferManager.MarkDownloadComplete(exception); + } + + /// + public void Dispose() + { + // _partBufferManager is owned by caller, don't dispose + } + + /// + /// Buffers a part from the GetObjectResponse stream into ArrayPool memory. + /// Used when a part arrives out of order and cannot be streamed directly. + /// + /// The part number being buffered. + /// The GetObjectResponse containing the part data stream. + /// Cancellation token for the operation. + /// A containing the buffered part data. + /// Thrown when buffering fails. The StreamPartBuffer will be disposed automatically. + private async Task BufferPartFromResponseAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + StreamPartBuffer downloadedPart = null; + + try + { + // Use ContentLength to determine exact bytes to read and allocate + long expectedBytes = response.ContentLength; + int initialBufferSize = (int)expectedBytes; + + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Allocating buffer of size {1} bytes from ArrayPool", + partNumber, initialBufferSize); + + downloadedPart = StreamPartBuffer.Create(partNumber, initialBufferSize); + + // Get reference to the buffer for writing + var partBuffer = downloadedPart.ArrayPoolBuffer; + + // Create a MemoryStream wrapper around the pooled buffer + // writable: true allows WriteResponseStreamAsync to write to it + // The MemoryStream starts at position 0 and can grow up to initialBufferSize + using (var memoryStream = new MemoryStream(partBuffer, 0, initialBufferSize, writable: true)) + { + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Reading response stream into buffer", + partNumber); + + // Use GetObjectResponse's stream copy logic which includes: + // - Progress tracking with events + // - Size validation (ContentLength vs bytes read) + // - Buffered reading with proper chunk sizes + await response.WriteResponseStreamAsync( + memoryStream, + null, // destination identifier (not needed for memory stream) + _config.BufferSize, + cancellationToken, + validateSize: true) + .ConfigureAwait(false); + + int totalRead = (int)memoryStream.Position; + + _logger.DebugFormat("BufferedPartDataHandler: [Part {0}] Read {1} bytes from response stream", + partNumber, totalRead); + + // Set the length to reflect actual bytes read + downloadedPart.SetLength(totalRead); + + if (totalRead != expectedBytes) + { + _logger.Error(null, "BufferedPartDataHandler: [Part {0}] Size mismatch - Expected {1} bytes, read {2} bytes", + partNumber, expectedBytes, totalRead); + } + } + + return downloadedPart; + } + catch (Exception ex) + { + _logger.Error(ex, "BufferedPartDataHandler: [Part {0}] Failed to buffer part from response stream", partNumber); + // If something goes wrong, StreamPartBuffer.Dispose() will handle cleanup + downloadedPart?.Dispose(); + throw; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ContinueOnFailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ContinueOnFailurePolicy.cs new file mode 100644 index 000000000000..1d6b0cfe7f00 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ContinueOnFailurePolicy.cs @@ -0,0 +1,104 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Concurrent; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Failure policy that records exceptions and allows other operations to continue. + /// + /// + /// Use this policy when individual operation failures should not abort the overall + /// download directory operation. Exceptions thrown by the action are captured and + /// stored in the supplied , and an optional + /// onFailure callback is invoked. For cancellation triggered by + /// the provided , cancellation is propagated + /// to callers by rethrowing the . + /// + internal class ContinueOnFailurePolicy : IFailurePolicy + { + private readonly ConcurrentBag _errors; + + /// + /// Initializes a new instance of the class. + /// + /// A used to collect exceptions + /// that occur while executing actions under this policy. Failures are added to this bag + /// so the caller can examine aggregated errors after the overall operation completes. + internal ContinueOnFailurePolicy(ConcurrentBag errors) + { + _errors = errors; + } + + /// + /// Executes and records failures without throwing them, + /// unless the failure is an operation cancellation triggered by the provided + /// . + /// + /// The asynchronous operation to execute under the policy. + /// A callback invoked with the exception when fails. + /// A used to determine and signal cancellation. + /// The policy will rethrow cancellations when the cancellation token was requested. + /// + /// A that completes with true when the action completed successfully. + /// If the action threw a non-cancellation exception, the exception is added to the internal error bag, + /// is invoked if provided, and the method completes with false to indicate + /// the action failed but the policy handled it and allowed processing to continue. + /// + public async Task ExecuteAsync(Func action, Action onFailure, CancellationTokenSource cancellationTokenSource) + { + try + { + await action().ConfigureAwait(false); + + return true; + } + // If the operation was canceled via the provided token, propagate cancellation. + catch (OperationCanceledException ex) when (cancellationTokenSource?.IsCancellationRequested == true) + { + onFailure?.Invoke(ex); + + // Collect the exception for later reporting. + _errors.Add(ex); + + throw; + } +// Disabled warning CA1031 to allow catching all exceptions to continue processing. +#pragma warning disable CA1031 + catch (Exception ex) +#pragma warning restore CA1031 + { + onFailure?.Invoke(ex); + + // Collect the exception for later reporting but don't throw it. + // This allows other downloads to continue processing. + _errors.Add(ex); + + return false; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs index 6359704fc0cd..bca43c615b05 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs @@ -33,7 +33,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { static int MAX_BACKOFF_IN_MILLISECONDS = (int)TimeSpan.FromSeconds(30).TotalMilliseconds; @@ -62,6 +62,34 @@ static Logger Logger IAmazonS3 _s3Client; TransferUtilityDownloadRequest _request; + long _totalTransferredBytes; + + #region Event Firing Methods + + private void FireTransferInitiatedEvent() + { + var transferInitiatedEventArgs = new DownloadInitiatedEventArgs(_request, _request.FilePath); + _request.OnRaiseTransferInitiatedEvent(transferInitiatedEventArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityDownloadResponse response, string filePath, long transferredBytes, long totalBytes) + { + var transferCompletedEventArgs = new DownloadCompletedEventArgs( + _request, + response, + filePath, + transferredBytes, + totalBytes); + _request.OnRaiseTransferCompletedEvent(transferCompletedEventArgs); + } + + private void FireTransferFailedEvent(string filePath, long transferredBytes, long totalBytes = -1) + { + var eventArgs = new DownloadFailedEventArgs(this._request, filePath, transferredBytes, totalBytes); + this._request.OnRaiseTransferFailedEvent(eventArgs); + } + + #endregion internal DownloadCommand(IAmazonS3 s3Client, TransferUtilityDownloadRequest request) { @@ -89,6 +117,12 @@ private void ValidateRequest() void OnWriteObjectProgressEvent(object sender, WriteObjectProgressArgs e) { + // Keep track of the total transferred bytes so that we can also return this value in case of failure + Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred); + + // Set the Request property to enable access to the original download request + e.Request = this._request; + this._request.OnRaiseProgressEvent(e); } @@ -176,4 +210,3 @@ static ByteRange ByteRangeRemainingForDownload(string filepath) } } } - diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 0140554ded39..4c897a5e9f1b 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -20,6 +20,7 @@ * */ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Text; @@ -33,25 +34,69 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { + private IFailurePolicy _failurePolicy; + private ConcurrentBag _errors = new ConcurrentBag(); private readonly IAmazonS3 _s3Client; private readonly TransferUtilityDownloadDirectoryRequest _request; private readonly bool _skipEncryptionInstructionFiles; + private readonly bool _useMultipartDownload; int _totalNumberOfFilesToDownload; int _numberOfFilesDownloaded; long _totalBytes; long _transferredBytes; string _currentFile; - internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDirectoryRequest request) + #region Event Firing Methods + + private void FireTransferInitiatedEvent() + { + var transferInitiatedEventArgs = new DownloadDirectoryInitiatedEventArgs(_request); + _request.OnRaiseDownloadDirectoryInitiatedEvent(transferInitiatedEventArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityDownloadDirectoryResponse response) + { + var transferCompletedEventArgs = new DownloadDirectoryCompletedEventArgs( + _request, + response, + Interlocked.Read(ref _transferredBytes), + _totalBytes, + _numberOfFilesDownloaded, + _totalNumberOfFilesToDownload); + _request.OnRaiseDownloadDirectoryCompletedEvent(transferCompletedEventArgs); + } + + private void FireTransferFailedEvent() + { + var eventArgs = new DownloadDirectoryFailedEventArgs( + _request, + Interlocked.Read(ref _transferredBytes), + _totalBytes, + _numberOfFilesDownloaded, + _totalNumberOfFilesToDownload); + _request.OnRaiseDownloadDirectoryFailedEvent(eventArgs); + } + + #endregion + + internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDirectoryRequest request, TransferUtilityConfig config, bool useMultipartDownload) { if (s3Client == null) - throw new ArgumentNullException("s3Client"); + throw new ArgumentNullException(nameof(s3Client)); + if (request == null) + throw new ArgumentNullException(nameof(request)); this._s3Client = s3Client; this._request = request; + this._config = config; this._skipEncryptionInstructionFiles = s3Client is Amazon.S3.Internal.IAmazonS3Encryption; + _failurePolicy = + request.FailurePolicy == FailurePolicy.AbortOnFailure + ? new AbortOnFailurePolicy() + : new ContinueOnFailurePolicy(_errors); + this._useMultipartDownload = useMultipartDownload; } private void downloadedProgressEventCallback(object sender, WriteObjectProgressArgs e) @@ -107,12 +152,6 @@ internal TransferUtilityDownloadRequest ConstructTransferUtilityDownloadRequest( downloadRequest.IfNoneMatch = this._request.IfNoneMatch; downloadRequest.ResponseHeaderOverrides = this._request.ResponseHeaderOverrides; - //Ensure the target file is a rooted within LocalDirectory. Otherwise error. - if(!InternalSDKUtils.IsFilePathRootedWithDirectoryPath(downloadRequest.FilePath, _request.LocalDirectory)) - { - throw new AmazonClientException($"The file {downloadRequest.FilePath} is not allowed outside of the target directory {_request.LocalDirectory}."); - } - downloadRequest.WriteObjectProgressEvent += downloadedProgressEventCallback; return downloadRequest; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadManagerConfiguration.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadManagerConfiguration.cs new file mode 100644 index 000000000000..35161aabba90 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadManagerConfiguration.cs @@ -0,0 +1,64 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Base configuration for multipart download coordination. + /// Contains settings common to all download strategies. + /// + internal class DownloadManagerConfiguration + { + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// + public int ConcurrentServiceRequests { get; set; } + + /// + /// Target part size in bytes. + /// + public long TargetPartSizeBytes { get; set; } + + /// + /// Protected default constructor for derived classes. + /// + protected DownloadManagerConfiguration() { } + + /// + /// Creates a DownloadManagerConfiguration with the specified configuration values. + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// Target size for each part in bytes. + /// Thrown when any parameter is less than or equal to 0. + public DownloadManagerConfiguration(int concurrentServiceRequests, long targetPartSizeBytes) + { + if (concurrentServiceRequests <= 0) + throw new ArgumentOutOfRangeException(nameof(concurrentServiceRequests), "Must be greater than 0"); + if (targetPartSizeBytes <= 0) + throw new ArgumentOutOfRangeException(nameof(targetPartSizeBytes), "Must be greater than 0"); + + ConcurrentServiceRequests = concurrentServiceRequests; + TargetPartSizeBytes = targetPartSizeBytes; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FileDownloadConfiguration.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FileDownloadConfiguration.cs new file mode 100644 index 000000000000..2193fa860ca6 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FileDownloadConfiguration.cs @@ -0,0 +1,67 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Configuration settings for file-based multipart downloads. + /// Extends base coordinator settings with file-specific parameters. + /// + internal class FileDownloadConfiguration : DownloadManagerConfiguration + { + /// + /// Buffer size for file I/O operations. + /// + public int BufferSize { get; set; } + + /// + /// Destination file path for the download. + /// + public string DestinationFilePath { get; set; } + + /// + /// Creates a FileDownloadConfiguration with the specified configuration values. + /// + /// Maximum concurrent HTTP requests for downloading parts. + /// Buffer size used for file I/O operations. + /// Target size for each part in bytes. + /// Destination file path for the download. + /// Thrown when any numeric parameter is less than or equal to 0. + /// Thrown when destinationFilePath is null or empty. + public FileDownloadConfiguration( + int concurrentServiceRequests, + int bufferSize, + long targetPartSizeBytes, + string destinationFilePath) + : base(concurrentServiceRequests, targetPartSizeBytes) + { + if (bufferSize <= 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize), "Must be greater than 0"); + if (string.IsNullOrWhiteSpace(destinationFilePath)) + throw new ArgumentException("Destination file path cannot be null or empty", nameof(destinationFilePath)); + + BufferSize = bufferSize; + DestinationFilePath = destinationFilePath; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs new file mode 100644 index 000000000000..d85556a34ecc --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/FilePartDataHandler.cs @@ -0,0 +1,241 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; +using Amazon.S3.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Writes downloaded parts directly to a file at specific offsets. + /// Supports concurrent writes from multiple parts for true parallel download to disk. + /// Uses temporary files with atomic commit for SEP compliance. + /// See for the contract this class implements. + /// + internal class FilePartDataHandler : IPartDataHandler + { + private readonly FileDownloadConfiguration _config; + private readonly AtomicFileHandler _fileHandler; + private string _tempFilePath; + private bool _disposed = false; + + private readonly Logger _logger = Logger.GetLogger(typeof(FilePartDataHandler)); + + /// + /// Initializes a new instance for file downloads. + /// Writes parts directly to disk without memory buffering. + /// + public FilePartDataHandler(FileDownloadConfiguration config) + { + _config = config ?? throw new ArgumentNullException(nameof(config)); + _fileHandler = new AtomicFileHandler(); + } + + /// + public Task PrepareAsync(DownloadResult discoveryResult, CancellationToken cancellationToken) + { + // Create temporary file once during preparation phase + _tempFilePath = _fileHandler.CreateTemporaryFile(_config.DestinationFilePath); + + _logger.DebugFormat("FilePartDataHandler: Created temporary file for download"); + + return Task.CompletedTask; + } + + /// + /// + /// Response Ownership: + /// + /// This method takes ownership of the response and is responsible for disposing it in ALL cases, + /// including error scenarios. Callers must NOT dispose the response after calling this method. + /// + /// + public async Task ProcessPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken) + { + try + { + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Starting to process part - ContentLength={1}", + partNumber, response.ContentLength); + + // Calculate offset for this part based on ContentRange or part number + long offset = GetPartOffset(response, partNumber); + + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Calculated file offset={1}", + partNumber, offset); + + // Write part data to file at the calculated offset + await WritePartToFileAsync(partNumber, offset, response, cancellationToken) + .ConfigureAwait(false); + + _logger.DebugFormat("FilePartDataHandler: [Part {0}] File write completed successfully", + partNumber); + } + finally + { + // Always dispose response after writing to disk (success or failure) + // This releases the HTTP connection back to the pool + response?.Dispose(); + } + } + + /// + public Task WaitForCapacityAsync(CancellationToken cancellationToken) + { + // No-op: FilePartDataHandler writes directly to disk without buffering parts in memory. + // Memory throttling is only needed for BufferedPartDataHandler which keeps parts in memory. + return Task.CompletedTask; + } + + /// + public void ReleaseCapacity() + { + // No-op: FilePartDataHandler writes directly to disk without buffering parts in memory. + // Memory throttling is only needed for BufferedPartDataHandler which keeps parts in memory. + } + + /// + public void OnDownloadComplete(Exception exception) + { + if (exception == null) + { + // Success - commit temp file to final destination + _logger.DebugFormat("FilePartDataHandler: Download complete, committing temporary file to destination"); + + try + { + _fileHandler.CommitFile(_tempFilePath, _config.DestinationFilePath); + + _logger.DebugFormat("FilePartDataHandler: Successfully committed file to destination"); + } + catch (Exception commitException) + { + _logger.Error(commitException, "FilePartDataHandler: Failed to commit file to destination"); + + // Cleanup on commit failure + _fileHandler.CleanupOnFailure(); + throw new InvalidOperationException( + "Failed to commit downloaded file to final destination", commitException); + } + } + else + { + // Failure - cleanup temp file + _logger.Error(exception, "FilePartDataHandler: Download failed, cleaning up temporary file"); + + _fileHandler.CleanupOnFailure(); + } + } + + /// + public void Dispose() + { + if (!_disposed) + { + _fileHandler?.Dispose(); + _disposed = true; + } + } + + /// + /// Gets the file offset for writing a part based on the header. + /// + private long GetPartOffset(GetObjectResponse response, int partNumber) + { + // Parse offset from ContentRange header (works for both PART and RANGE strategies) + if (!string.IsNullOrEmpty(response.ContentRange)) + { + // Use centralized ContentRange parsing utility + return ContentRangeParser.GetStartByte(response.ContentRange); + } + + // For single-part downloads (especially empty objects), ContentRange may not be present + // S3 doesn't include ContentRange for simple GET requests without range headers + // In this case, the offset is always 0 since we're writing the entire response + if (partNumber == 1) + { + return 0; + } + + // ContentRange should be present for actual multipart downloads (part > 1) + throw new InvalidOperationException( + $"ContentRange header missing from part {partNumber} response. " + + $"Unable to determine file write offset."); + } + + /// + /// Writes part data from GetObjectResponse ResponseStream to the file at the specified offset. + /// + private async Task WritePartToFileAsync( + int partNumber, + long offset, + GetObjectResponse response, + CancellationToken cancellationToken) + { + if (string.IsNullOrEmpty(_tempFilePath)) + throw new InvalidOperationException("Temporary file has not been created"); + + _logger.DebugFormat("FilePartDataHandler: Opening file for writing at offset {0} with BufferSize={1}", + offset, _config.BufferSize); + + // Open file with FileShare.Write to allow concurrent writes from other threads + using (var fileStream = new FileStream( + _tempFilePath, + FileMode.Open, // Open existing file + FileAccess.Write, + FileShare.Write, // Allow concurrent writes to different offsets + _config.BufferSize)) + { + // Seek to the correct offset for this part + fileStream.Seek(offset, SeekOrigin.Begin); + + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Writing {1} bytes to file at offset {2}", partNumber, + response.ContentLength, offset); + + // Use GetObjectResponse's stream copy logic which includes: + // - Progress tracking with events + // - Size validation + // - Buffered reading + await response.WriteResponseStreamAsync( + fileStream, + null, + _config.BufferSize, + cancellationToken, + validateSize: true) + .ConfigureAwait(false); + + // Ensure data is written to disk + await fileStream.FlushAsync(cancellationToken) + .ConfigureAwait(false); + + _logger.DebugFormat("FilePartDataHandler: [Part {0}] Successfully wrote {1} bytes at offset {2}", partNumber, + response.ContentLength, offset); + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs new file mode 100644 index 000000000000..3c54bfcd8b00 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IDownloadManager.cs @@ -0,0 +1,86 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Coordinates multipart downloads including discovery, concurrent downloads, and progress reporting. + /// + internal interface IDownloadManager : IDisposable + { + /// + /// Discovers the download strategy and starts concurrent downloads in a single operation. + /// This unified method eliminates resource leakage by managing HTTP slots and buffer capacity + /// internally throughout the entire download lifecycle. + /// + /// Optional callback for progress tracking events. + /// A token to cancel the download operation. + /// + /// A task containing download results including total parts, object size, + /// and initial response data. + /// + /// + /// This method performs both discovery and download operations atomically: + /// 1. Acquires HTTP slot and buffer capacity + /// 2. Makes initial GetObject request to discover download strategy + /// 3. Processes Part 1 immediately + /// 4. Starts background downloads for remaining parts (if multipart) + /// 5. Returns after Part 1 is processed, allowing consumer to begin reading + /// + /// Resources (HTTP slots, buffer capacity) are managed internally and released + /// at the appropriate times, eliminating the awkward resource holding that existed + /// with the previous two-method API. + /// + Task StartDownloadAsync(EventHandler progressCallback, CancellationToken cancellationToken); + } + + /// + /// Download results with metadata about the completed discovery and initial download. + /// + internal class DownloadResult + { + /// + /// Total parts needed (1 = single-part, >1 = multipart). + /// + public int TotalParts { get; set; } + + /// + /// Total size of the object in bytes. + /// + public long ObjectSize { get; set; } + + /// + /// GetObjectResponse obtained during download initialization, containing the ResponseStream. + /// Represents the complete object for single-part downloads or the first range/part for multipart downloads. + /// + public GetObjectResponse InitialResponse { get; set; } + + /// + /// Whether this is a single-part download. + /// + public bool IsSinglePart => TotalParts == 1; + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IFailurePolicy.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IFailurePolicy.cs new file mode 100644 index 000000000000..a32a6ef538b2 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IFailurePolicy.cs @@ -0,0 +1,62 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Defines a policy for handling failures when executing asynchronous operations. + /// Implementations encapsulate cancellation behavior for + /// operations that may fail and need controlled continuation or termination. + /// + internal interface IFailurePolicy + { + /// + /// Executes an asynchronous under this failure policy. + /// + /// + /// Implementations of this interface control how failures that occur while running + /// are handled (for example, whether to abort the overall + /// operation, continue on failure, or aggregate errors). When + /// throws or faults, the policy implementation is responsible for invoking + /// with the thrown and for + /// taking any policy-specific cancellation action (for example by calling + /// .Cancel()). + /// + /// The returned completes with true when the + /// completed successfully according to the policy and + /// the caller may proceed. It completes with false when the action failed and + /// the policy handled the failure (the caller should treat this as a failed step). + /// + /// A function that performs the asynchronous work to execute under the policy. + /// A callback that will be invoked with the exception when fails. + /// A the policy may cancel to signal termination of related work. + /// + /// A that resolves to true if the action completed successfully + /// (no failure), or false if the action failed but the policy handled the failure. + /// + Task ExecuteAsync(Func action, Action onFailure, CancellationTokenSource cancellationTokenSource); + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs new file mode 100644 index 000000000000..5f5c214421b5 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartBufferManager.cs @@ -0,0 +1,90 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Manages part buffers with ArrayPool lifecycle and concurrency control. + /// + internal interface IPartBufferManager : IDisposable + { + /// + /// Waits for available buffer space before downloading a new part. + /// + /// A token to cancel the wait operation. + /// A task that completes when buffer space becomes available. + Task WaitForBufferSpaceAsync(CancellationToken cancellationToken); + + /// + /// Adds a part data source and signals readers when next expected part arrives. + /// + /// The part data source to add. + void AddDataSource(IPartDataSource dataSource); + + /// + /// Adds a downloaded part buffer and signals readers when next expected part arrives. + /// + /// The downloaded part buffer to add. + void AddBuffer(StreamPartBuffer buffer); + + /// + /// Adds a part data source (streaming or buffered) and signals readers when next expected part arrives. + /// + /// The part data source to add (can be StreamingDataSource or BufferedDataSource). + /// A task that completes when the data source has been added and signaling is complete. + void AddBuffer(IPartDataSource dataSource); + + /// + /// Reads data from the buffer manager. Automatically handles sequential part consumption + /// and reads across part boundaries to fill the buffer when possible, matching standard Stream.Read() behavior. + /// + /// The buffer to read data into. + /// The offset in the buffer. + /// The maximum number of bytes to read. + /// A token to cancel the operation. + /// + /// A task whose result contains the number of bytes read. + /// Returns 0 bytes read when end of stream is reached. + /// + Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken); + + /// + /// Releases buffer space to allow new parts to be downloaded. + /// + void ReleaseBufferSpace(); + + /// + /// Next expected part number in the sequence. + /// + int NextExpectedPartNumber { get; } + + /// + /// Marks download completion and handles end-of-stream. + /// + /// Any exception that occurred during downloads, or null if successful. + void MarkDownloadComplete(Exception exception); + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs new file mode 100644 index 000000000000..43cdce2075f6 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataHandler.cs @@ -0,0 +1,79 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Strategy interface for processing downloaded part data. + /// Implementations determine WHERE and HOW downloaded parts are stored. + /// Enables separation of download orchestration from data handling (buffering, file writing, etc). + /// + internal interface IPartDataHandler : IDisposable + { + /// + /// Prepare the handler for processing parts based on discovery result. + /// Called once before any parts are processed to perform initialization. + /// + /// Discovery result containing object metadata + /// Cancellation token + /// Task that completes when preparation is done + Task PrepareAsync(DownloadResult discoveryResult, CancellationToken cancellationToken); + + /// + /// Process a downloaded part from the GetObjectResponse. + /// Implementation decides whether to buffer in memory, write to file, etc. + /// + /// 1-based part number + /// GetObjectResponse with ResponseStream to process + /// Cancellation token + /// Task that completes when part processing is done + Task ProcessPartAsync( + int partNumber, + GetObjectResponse response, + CancellationToken cancellationToken); + + /// + /// Wait for available capacity to process another part. + /// Allows implementations to implement backpressure (memory limits, concurrency, etc). + /// + /// Cancellation token + /// Task that completes when capacity becomes available + Task WaitForCapacityAsync(CancellationToken cancellationToken); + + /// + /// Release capacity after a part is processed/consumed. + /// Pairs with WaitForCapacityAsync for backpressure management. + /// + void ReleaseCapacity(); + + /// + /// Called when all downloads complete (successfully or with error). + /// Allows implementations to perform cleanup or commit operations. + /// + /// Exception if download failed, null if successful + void OnDownloadComplete(Exception exception); + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataSource.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataSource.cs new file mode 100644 index 000000000000..b1ace5ebcbd9 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/IPartDataSource.cs @@ -0,0 +1,53 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Interface for buffered part data sources. + /// + internal interface IPartDataSource : IDisposable + { + /// + /// Reads data from the ArrayPool buffer into the destination buffer. + /// + /// Destination buffer + /// Offset in destination buffer + /// Maximum bytes to read + /// Cancellation token + /// Number of bytes actually read + Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken); + + /// + /// Whether this data source has been fully consumed. + /// + bool IsComplete { get; } + + /// + /// Part number this data source represents. + /// + int PartNumber { get; } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs new file mode 100644 index 000000000000..e282decfbc18 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadCommand.cs @@ -0,0 +1,211 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; +using Amazon.S3.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Command for downloading files using multipart download strategy. + /// Orchestrates the and + /// to perform concurrent part downloads with SEP compliance. + /// + internal partial class MultipartDownloadCommand : BaseCommand + { + private readonly IAmazonS3 _s3Client; + private readonly TransferUtilityDownloadRequest _request; + private readonly TransferUtilityConfig _config; + private readonly SemaphoreSlim _sharedHttpThrottler; + + // Track last known transferred bytes from coordinator's progress events + private long _lastKnownTransferredBytes; + + private readonly Logger _logger = Logger.GetLogger(typeof(MultipartDownloadCommand)); + + /// + /// Initializes a new instance of the MultipartDownloadCommand class for single file downloads. + /// + /// The S3 client to use for downloads. + /// The download request containing configuration. + /// The TransferUtility configuration. + internal MultipartDownloadCommand(IAmazonS3 s3Client, TransferUtilityDownloadRequest request, TransferUtilityConfig config) + : this(s3Client, request, config, null) + { + } + + /// + /// Initializes a new instance of the MultipartDownloadCommand class for directory downloads. + /// + /// The S3 client to use for downloads. + /// The download request containing configuration. + /// The TransferUtility configuration. + /// Shared HTTP concurrency throttler for directory operations, or null for single file downloads. + internal MultipartDownloadCommand(IAmazonS3 s3Client, TransferUtilityDownloadRequest request, TransferUtilityConfig config, SemaphoreSlim sharedHttpThrottler) + { + _s3Client = s3Client ?? throw new ArgumentNullException(nameof(s3Client)); + _request = request ?? throw new ArgumentNullException(nameof(request)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + _sharedHttpThrottler = sharedHttpThrottler; // Can be null for single file downloads + } + + /// + /// Validates the download request to ensure all required parameters are set. + /// + /// Thrown when required parameters are missing. + private void ValidateRequest() + { + if (!_request.IsSetBucketName()) + { + throw new InvalidOperationException("The BucketName specified is null or empty!"); + } + + if (!_request.IsSetKey()) + { + throw new InvalidOperationException("The Key specified is null or empty!"); + } + +#if BCL + if (!_request.IsSetFilePath()) + { + throw new InvalidOperationException("The FilePath specified is null or empty!"); + } +#endif + } + + /// + /// Creates a FileDownloadConfiguration from the request and S3 client configuration. + /// + /// A configured FileDownloadConfiguration instance. + private FileDownloadConfiguration CreateConfiguration() + { + // Use concurrent service requests from config + int concurrentRequests = _config.ConcurrentServiceRequests; + + // Determine target part size + // Use request setting if available, otherwise use 8MB default (matching BufferedMultipartStream) + long targetPartSize = _request.IsSetPartSize() + ? _request.PartSize + : S3Constants.DefaultPartSize; // 8MB default + + // Use S3 client buffer size for I/O operations + int bufferSize = _s3Client.Config.BufferSize; + + _logger.DebugFormat("MultipartDownloadCommand: Creating configuration - PartSizeFromRequest={0}, UsingDefaultPartSize={1}", + _request.IsSetPartSize() ? _request.PartSize.ToString() : "Not Set", + !_request.IsSetPartSize()); + + return new FileDownloadConfiguration( + concurrentRequests, + bufferSize, + targetPartSize, + _request.FilePath + ); + } + + #region Event Firing Methods + + /// + /// Fires the DownloadInitiatedEvent to notify subscribers that the download has started. + /// This event is fired exactly once at the beginning of the download operation. + /// + private void FireTransferInitiatedEvent() + { + var transferInitiatedEventArgs = new DownloadInitiatedEventArgs(_request, _request.FilePath); + _request.OnRaiseTransferInitiatedEvent(transferInitiatedEventArgs); + } + + /// + /// Fires the DownloadCompletedEvent to notify subscribers that the download completed successfully. + /// This event is fired exactly once when all parts have been downloaded and assembled. + /// Downloads are complete, so transferred bytes equals total bytes. + /// + /// The unified TransferUtilityDownloadResponse containing S3 metadata + /// The total number of bytes in the file + private void FireTransferCompletedEvent(TransferUtilityDownloadResponse response, long totalBytes) + { + var transferCompletedEventArgs = new DownloadCompletedEventArgs( + _request, + response, + _request.FilePath, + totalBytes, + totalBytes); + _request.OnRaiseTransferCompletedEvent(transferCompletedEventArgs); + } + + /// + /// Fires the DownloadFailedEvent to notify subscribers that the download failed. + /// This event is fired exactly once when an error occurs during the download. + /// Uses the last known transferred bytes from progress tracking. + /// + /// Total file size if known, otherwise -1 + private void FireTransferFailedEvent(long totalBytes = -1) + { + var eventArgs = new DownloadFailedEventArgs( + _request, + _request.FilePath, + System.Threading.Interlocked.Read(ref _lastKnownTransferredBytes), + totalBytes); + _request.OnRaiseTransferFailedEvent(eventArgs); + } + + #endregion + + #region Progress Tracking + + /// + /// Callback for part download progress. + /// Forwards the aggregated progress events from the coordinator to the user's progress callback. + /// The coordinator has already aggregated progress across all concurrent part downloads. + /// Tracks the last known transferred bytes for failure reporting. + /// + /// The event sender (coordinator) + /// Aggregated progress information from the coordinator + internal void DownloadPartProgressEventCallback(object sender, WriteObjectProgressArgs e) + { + // Track last known transferred bytes using Exchange (not Add). + // + // Why Exchange? The coordinator already aggregates increments from concurrent parts: + // Coordinator receives: Part 1: +512 bytes, Part 2: +1024 bytes, Part 3: +768 bytes + // Coordinator aggregates: 0 -> 512 -> 1536 -> 2304 (using Interlocked.Add) + // Coordinator passes to us: e.TransferredBytes = 2304 (pre-aggregated total) + // + // We receive the TOTAL (e.TransferredBytes = 2304), not an increment (+768). + // Using Add here would incorrectly accumulate totals: 0 + 2304 + 2304 + ... = wrong! + // Using Exchange correctly stores the latest total: 2304 (overwrite previous value). + // + // Compare to other commands (SimpleUploadCommand, DownloadCommand) which receive + // INCREMENTS directly from SDK streams and must use Add to accumulate them. + System.Threading.Interlocked.Exchange(ref _lastKnownTransferredBytes, e.TransferredBytes); + + // Set the Request property to enable access to the original download request + e.Request = _request; + + // Forward the coordinator's aggregated progress event to the user + _request.OnRaiseProgressEvent(e); + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs new file mode 100644 index 000000000000..a961fa0141f8 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartDownloadManager.cs @@ -0,0 +1,994 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; +using Amazon.S3.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Coordinates multipart downloads using PART or RANGE strategies per SEP spec. + /// Handles discovery, concurrent downloads. + /// + internal class MultipartDownloadManager : IDownloadManager + { + private readonly IAmazonS3 _s3Client; + private readonly BaseDownloadRequest _request; + private readonly DownloadManagerConfiguration _config; + private readonly IPartDataHandler _dataHandler; + private readonly SemaphoreSlim _httpConcurrencySlots; + private readonly bool _ownsHttpThrottler; + private readonly RequestEventHandler _requestEventHandler; + private bool _disposed = false; + private bool _discoveryCompleted = false; + private Task _downloadCompletionTask; + + private string _savedETag; + private int _discoveredPartCount; + + // Progress tracking fields for multipart download aggregation + private long _totalTransferredBytes = 0; + private long _totalObjectSize = 0; + private EventHandler _userProgressCallback; + + // Atomic flag to ensure completion event fires exactly once + // Without this, concurrent parts completing simultaneously can both see + // transferredBytes == _totalObjectSize and fire duplicate completion events + // Uses long instead of bool for compatibility with Interlocked operations + private long _completionEventFired = 0; // 0 = false, 1 = true + + private readonly Logger _logger = Logger.GetLogger(typeof(MultipartDownloadManager)); + + /// + /// Task that completes when all downloads finish (successfully or with error). + /// For file-based downloads, await this before returning to ensure file is committed. + /// For stream-based downloads, this can be ignored as the consumer naturally waits. + /// Returns a completed task if downloads haven't started or completed synchronously. + /// + public Task DownloadCompletionTask => _downloadCompletionTask ?? Task.CompletedTask; + + /// + /// Initializes a new instance of the for single file downloads. + /// This constructor creates and owns its own HTTP concurrency throttler based on the configuration. + /// + /// The client used to make GetObject requests to S3. + /// The containing bucket, key, version, and download strategy configuration. + /// The specifying concurrency limits and part size settings. + /// The responsible for buffering and processing downloaded part data. + /// Optional request event handler for adding custom headers or tracking requests. May be null. + /// + /// Thrown when , , , or is null. + /// + /// + /// This constructor is used for single file downloads where each download manages its own HTTP concurrency. + /// The created throttler will be disposed when this instance is disposed. + /// For directory downloads with shared concurrency management, use the overload that accepts a shared throttler. + /// + /// + /// + /// + /// Thrown when using S3 encryption client, which does not support multipart downloads. + public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler = null) + : this(s3Client, request, config, dataHandler, requestEventHandler, null) + { + } + + /// + /// Initializes a new instance of the for directory downloads or scenarios requiring shared concurrency control. + /// This constructor allows using a shared HTTP concurrency throttler across multiple concurrent file downloads. + /// + /// The client used to make GetObject requests to S3. + /// The containing bucket, key, version, and download strategy configuration. + /// The specifying concurrency limits and part size settings. + /// The responsible for buffering and processing downloaded part data. + /// Optional request event handler for adding custom headers or tracking requests. May be null. + /// + /// Optional shared for coordinating HTTP concurrency across multiple downloads. + /// If null, a new throttler will be created and owned by this instance. + /// If provided, the caller retains ownership and responsibility for disposal. + /// + /// + /// Thrown when , , , or is null. + /// + /// + /// + /// This constructor is typically used by directory download operations where multiple files are being downloaded + /// concurrently and need to share a global HTTP concurrency limit. + /// + /// + /// Resource Ownership: + /// If is provided, this instance does NOT take ownership and will NOT dispose it. + /// If is null, this instance creates and owns the throttler and will dispose it. + /// + /// + /// + /// + /// + /// + /// Thrown when using S3 encryption client, which does not support multipart downloads. + public MultipartDownloadManager(IAmazonS3 s3Client, BaseDownloadRequest request, DownloadManagerConfiguration config, IPartDataHandler dataHandler, RequestEventHandler requestEventHandler, SemaphoreSlim sharedHttpThrottler) + { + _s3Client = s3Client ?? throw new ArgumentNullException(nameof(s3Client)); + _request = request ?? throw new ArgumentNullException(nameof(request)); + _config = config ?? throw new ArgumentNullException(nameof(config)); + _dataHandler = dataHandler ?? throw new ArgumentNullException(nameof(dataHandler)); + + // Validate that S3 encryption client is not being used for multipart downloads + if (_s3Client is Amazon.S3.Internal.IAmazonS3Encryption) + { + throw new NotSupportedException("Multipart download is not supported when using Amazon.S3.Internal.IAmazonS3Encryption client. Please use the Amazon.S3.AmazonS3Client for multipart download."); + } + + _requestEventHandler = requestEventHandler; + + // Use shared throttler if provided, otherwise create our own + if (sharedHttpThrottler != null) + { + _httpConcurrencySlots = sharedHttpThrottler; + _ownsHttpThrottler = false; // Don't dispose - directory command owns it + } + else + { + _httpConcurrencySlots = new SemaphoreSlim( + _config.ConcurrentServiceRequests, // initialCount + _config.ConcurrentServiceRequests // maxCount - prevents exceeding configured limit + ); + _ownsHttpThrottler = true; // We own it, so we dispose it + } + } + + /// + /// Discovers the download strategy and starts concurrent downloads in a single unified operation. + /// This eliminates resource leakage by managing HTTP slots and buffer capacity internally. + /// + /// Optional callback for progress tracking events. + /// A token to cancel the download operation. + /// + /// A containing information about the object size, part count, + /// and the initial GetObject response. + /// + /// + /// This method performs both discovery and download operations atomically: + /// 1. Acquires HTTP slot and buffer capacity + /// 2. Makes initial GetObject request to discover download strategy + /// 3. Processes Part 1 immediately + /// 4. Starts background downloads for remaining parts (if multipart) + /// 5. Returns after Part 1 is processed, allowing consumer to begin reading + /// + /// Resources (HTTP slots, buffer capacity) are managed internally and released + /// at the appropriate times + /// + /// Thrown if the manager has been disposed. + /// Thrown if download has already been started. + /// Thrown if the operation is cancelled. + /// + public async Task StartDownloadAsync(EventHandler progressCallback, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (_discoveryCompleted) + throw new InvalidOperationException("Download has already been started"); + + // Step 1: Perform discovery (acquires resources, downloads Part 1) + var discoveryResult = await PerformDiscoveryAsync(cancellationToken).ConfigureAwait(false); + + // Step 2: Process Part 1 and start remaining downloads + await PerformDownloadsAsync(discoveryResult, progressCallback, cancellationToken).ConfigureAwait(false); + + // Step 3: Return results to caller + return discoveryResult; + } + + /// + /// Performs the discovery phase by making an initial GetObject request. + /// + /// Cancellation token to cancel the discovery operation. + /// + /// A containing information about the object size, part count, + /// and the initial GetObject response. + /// + /// + /// This method acquires an HTTP concurrency slot and buffer capacity, then makes the initial + /// GetObject request to determine the download strategy. The HTTP slot is held until + /// PerformDownloadsAsync processes Part 1. + /// + private async Task PerformDiscoveryAsync(CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (_discoveryCompleted) + throw new InvalidOperationException("Discovery has already been performed"); + + _logger.DebugFormat("MultipartDownloadManager: Starting discovery with strategy={0}", + _request.MultipartDownloadType); + + try + { + // Use strategy-specific discovery based on MultipartDownloadType + var result = _request.MultipartDownloadType == MultipartDownloadType.PART + ? await DiscoverUsingPartStrategyAsync(cancellationToken).ConfigureAwait(false) + : await DiscoverUsingRangeStrategyAsync(cancellationToken).ConfigureAwait(false); + + _discoveryCompleted = true; + + _logger.InfoFormat("MultipartDownloadManager: Discovery complete - ObjectSize={0}, TotalParts={1}, Strategy={2}, ETagPresent={3}", + result.ObjectSize, + result.TotalParts, + _request.MultipartDownloadType, + !string.IsNullOrEmpty(_savedETag)); + + return result; + } + catch (Exception ex) + { + _logger.Error(ex, "MultipartDownloadManager: Discovery failed"); + throw; + } + } + + /// + /// Processes Part 1 and starts downloading remaining parts for multipart downloads. + /// Returns immediately after processing Part 1 to allow the consumer to begin reading. + /// + /// + /// The download result from discovery containing object metadata and the initial GetObject response. + /// + /// + /// Optional progress callback that will be invoked as parts are downloaded. For multipart downloads, + /// progress is aggregated across all concurrent part downloads. + /// + /// Cancellation token to cancel the download operation. + /// + /// A task that completes after Part 1 is processed. For multipart downloads, remaining parts + /// continue downloading in the background (monitor via ). + /// + /// + /// This is a private method called by StartDownloadAsync after discovery completes. + /// It processes Part 1 and starts background downloads for remaining parts. + /// + private async Task PerformDownloadsAsync(DownloadResult downloadResult, EventHandler progressCallback, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (downloadResult == null) + throw new ArgumentNullException(nameof(downloadResult)); + + // Store for progress aggregation + _userProgressCallback = progressCallback; + _totalObjectSize = downloadResult.ObjectSize; + + _logger.DebugFormat("MultipartDownloadManager: Starting downloads - TotalParts={0}, IsSinglePart={1}", + downloadResult.TotalParts, downloadResult.IsSinglePart); + + var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + + try + { + // Create delegate once and reuse for all parts + var wrappedCallback = progressCallback != null + ? new EventHandler(DownloadPartProgressEventCallback) + : null; + + // Process Part 1 (downloaded during discovery) + await ProcessFirstPartAsync(downloadResult, wrappedCallback, cancellationToken).ConfigureAwait(false); + + if (downloadResult.IsSinglePart) + { + // Single-part: Part 1 is the entire object + _logger.DebugFormat("MultipartDownloadManager: Single-part download complete"); + _dataHandler.OnDownloadComplete(null); + return; + } + + // Check if already cancelled before creating background task + cancellationToken.ThrowIfCancellationRequested(); + + // Start background task to handle remaining parts + // This allows the method to return immediately so the consumer can start reading + // which prevents deadlock when MaxInMemoryParts is reached before consumer begins reading + _downloadCompletionTask = Task.Run(async () => + { + await StartBackgroundDownloadsAsync(downloadResult, wrappedCallback, internalCts).ConfigureAwait(false); + }, cancellationToken); + + // Return immediately to allow consumer to start reading + // This prevents deadlock when buffer fills up before consumer begins reading + _logger.DebugFormat("MultipartDownloadManager: Returning to allow consumer to start reading"); + } + catch (Exception ex) + { + _logger.Error(ex, "MultipartDownloadManager: Download failed"); + + HandleDownloadError(ex, internalCts); + + // Dispose the CancellationTokenSource if background task was never started + // This handles the case where an error occurs before Task.Run is called + internalCts.Dispose(); + + throw; + } + } + + + + /// + /// Processes Part 1 (downloaded during discovery) including preparation, progress tracking, and semaphore release. + /// + private async Task ProcessFirstPartAsync(DownloadResult downloadResult, EventHandler wrappedCallback, CancellationToken cancellationToken) + { + try + { + // Prepare the data handler (e.g., create temp files for file-based downloads) + await _dataHandler.PrepareAsync(downloadResult, cancellationToken).ConfigureAwait(false); + + // Attach progress callback to Part 1's response if provided + if (wrappedCallback != null) + { + downloadResult.InitialResponse.WriteObjectProgressEvent += wrappedCallback; + } + + // Process Part 1 from InitialResponse (applies to both single-part and multipart) + // NOTE: Semaphore is still held from discovery phase and will be released in finally block + _logger.DebugFormat("MultipartDownloadManager: Processing Part 1 from discovery response"); + await _dataHandler.ProcessPartAsync(1, downloadResult.InitialResponse, cancellationToken).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: Part 1 processing completed"); + } + finally + { + // Always detach the event handler to prevent memory leak + if (wrappedCallback != null) + { + downloadResult.InitialResponse.WriteObjectProgressEvent -= wrappedCallback; + } + + // Release semaphore after BOTH network download AND disk write complete for Part 1 + // This ensures ConcurrentServiceRequests controls the entire I/O operation, + // consistent with Parts 2+ (see CreateDownloadTaskAsync) + _httpConcurrencySlots.Release(); + _logger.DebugFormat("MultipartDownloadManager: [Part 1] HTTP concurrency slot released (Available: {0}/{1})", + _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + } + } + + /// + /// Starts background downloads for remaining parts (Part 2+) in a multipart download. + /// Handles capacity acquisition, task creation, completion validation, and error handling. + /// + private async Task StartBackgroundDownloadsAsync(DownloadResult downloadResult, EventHandler wrappedCallback, CancellationTokenSource internalCts) + { + var downloadTasks = new List(); + + try + { + _logger.DebugFormat("MultipartDownloadManager: Background task starting capacity acquisition and downloads"); + + // Multipart: Start concurrent downloads for remaining parts (Part 2 onwards) + _logger.InfoFormat("MultipartDownloadManager: Starting concurrent downloads for parts 2-{0}", + downloadResult.TotalParts); + + // Create download tasks for all remaining parts + await CreateDownloadTasksAsync(downloadResult, wrappedCallback, internalCts, downloadTasks).ConfigureAwait(false); + + var expectedTaskCount = downloadTasks.Count; + _logger.DebugFormat("MultipartDownloadManager: Background task waiting for {0} download tasks", expectedTaskCount); + + // Wait for all downloads to complete (fails fast on first exception) + await TaskHelpers.WhenAllFailFastAsync(downloadTasks, internalCts.Token).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: All download tasks completed successfully"); + + // Validate completion and mark successful + ValidateDownloadCompletion(expectedTaskCount, downloadResult.TotalParts); + + // Mark successful completion + _logger.InfoFormat("MultipartDownloadManager: Download completed successfully - TotalParts={0}", + downloadResult.TotalParts); + _dataHandler.OnDownloadComplete(null); + } + #pragma warning disable CA1031 // Do not catch general exception types + catch (Exception ex) + { + HandleDownloadError(ex, internalCts); + throw; + } + #pragma warning restore CA1031 // Do not catch general exception types + finally + { + // Dispose the CancellationTokenSource after all background operations complete + // This ensures the token remains valid for the entire lifetime of download tasks + internalCts.Dispose(); + } + } + + /// + /// Creates download tasks for all remaining parts (Part 2+) with sequential capacity acquisition. + /// Pre-acquires capacity in sequential order to prevent race condition deadlock. + /// + private async Task CreateDownloadTasksAsync(DownloadResult downloadResult, EventHandler wrappedCallback, CancellationTokenSource internalCts, List downloadTasks) + { + // Pre-acquire capacity in sequential order to prevent race condition deadlock + // This ensures Part 2 gets capacity before Part 3, etc., preventing out-of-order + // parts from consuming all buffer slots and blocking the next expected part + for (int partNum = 2; partNum <= downloadResult.TotalParts && !internalCts.IsCancellationRequested; partNum++) + { + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for buffer space", partNum); + + // Acquire capacity sequentially - guarantees Part 2 before Part 3, etc. + await _dataHandler.WaitForCapacityAsync(internalCts.Token).ConfigureAwait(false); + + // Check cancellation after acquiring capacity - a task may have failed while waiting + if (internalCts.IsCancellationRequested) + { + _logger.InfoFormat("MultipartDownloadManager: [Part {0}] Stopping early - cancellation requested after capacity acquired", partNum); + _dataHandler.ReleaseCapacity(); + break; + } + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Buffer space acquired", partNum); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Waiting for HTTP concurrency slot (Available: {1}/{2})", + partNum, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + + // Acquire HTTP slot in the loop before creating task + // Loop will block here if all slots are in use + await _httpConcurrencySlots.WaitAsync(internalCts.Token).ConfigureAwait(false); + + // Check cancellation after acquiring HTTP slot - a task may have failed while waiting + if (internalCts.IsCancellationRequested) + { + _logger.InfoFormat("MultipartDownloadManager: [Part {0}] Stopping early - cancellation requested after HTTP slot acquired", partNum); + _httpConcurrencySlots.Release(); + _dataHandler.ReleaseCapacity(); + break; + } + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot acquired", partNum); + + try + { + var task = CreateDownloadTaskAsync(partNum, downloadResult.ObjectSize, wrappedCallback, internalCts.Token); + downloadTasks.Add(task); + } + catch (Exception ex) + { + // If task creation fails, release the HTTP slot we just acquired + _httpConcurrencySlots.Release(); + _dataHandler.ReleaseCapacity(); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released due to task creation failure: {1}", partNum, ex); + throw; + } + } + + if (internalCts.IsCancellationRequested && downloadTasks.Count < downloadResult.TotalParts - 1) + { + _logger.InfoFormat("MultipartDownloadManager: Stopped queuing early at {0} parts due to cancellation", downloadTasks.Count); + } + } + + /// + /// Validates that the expected number of parts were downloaded for SEP compliance. + /// + private void ValidateDownloadCompletion(int expectedTaskCount, int totalParts) + { + // SEP Part GET Step 6 / Ranged GET Step 8: + // "validate that the total number of part GET requests sent matches with the expected PartsCount" + // Note: This should always be true if we reach this point, since WhenAllFailFastAsync + // ensures all tasks completed successfully (or threw on first failure). + // The check serves as a defensive assertion for SEP compliance. + // Note: expectedTaskCount + 1 accounts for Part 1 being buffered during discovery + if (expectedTaskCount + 1 != totalParts) + { + throw new InvalidOperationException( + $"Request count mismatch. Expected {totalParts} parts, " + + $"but sent {expectedTaskCount + 1} requests"); + } + } + + /// + /// Handles download errors by cancelling remaining downloads and notifying the data handler. + /// + private void HandleDownloadError(Exception ex, CancellationTokenSource internalCts) + { + // Cancel all remaining downloads immediately to prevent cascading timeout errors + // This ensures that when one part fails, other tasks stop gracefully instead of + // continuing until they hit their own timeout/cancellation errors + // Check if cancellation was already requested to avoid ObjectDisposedException + if (!internalCts.IsCancellationRequested) + { + try + { + internalCts.Cancel(); + _logger.DebugFormat("MultipartDownloadManager: Cancelled all in-flight downloads due to error"); + } + catch (ObjectDisposedException) + { + // CancellationTokenSource was already disposed, ignore + _logger.DebugFormat("MultipartDownloadManager: CancellationTokenSource already disposed during cancellation"); + } + } + + _dataHandler.OnDownloadComplete(ex); + } + + private async Task CreateDownloadTaskAsync(int partNumber, long objectSize, EventHandler progressCallback, CancellationToken cancellationToken) + { + GetObjectResponse response = null; + var ownsResponse = false; // Track if we still own the response + + try + { + // HTTP slot was already acquired in the for loop before this task was created + // We just need to use it and release it when done + + try + { + // Create strategy-specific request + GetObjectRequest getObjectRequest; + + if (_request.MultipartDownloadType == MultipartDownloadType.PART) + { + // PART strategy: Use part number from original upload + getObjectRequest = CreateGetObjectRequest(); + getObjectRequest.PartNumber = partNumber; + + // SEP Part GET Step 4: "The S3 Transfer Manager MUST also set IfMatch member + // for each request to the Etag value saved from Step 3" + getObjectRequest.EtagToMatch = _savedETag; + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with PartNumber={1}, IfMatchPresent={2}", + partNumber, partNumber, !string.IsNullOrEmpty(_savedETag)); + } + else + { + // RANGE strategy: Use calculated byte range + var (startByte, endByte) = CalculatePartRange(partNumber, objectSize); + + getObjectRequest = CreateGetObjectRequest(); + getObjectRequest.ByteRange = new ByteRange(startByte, endByte); + + // SEP Ranged GET Step 6: "The S3 Transfer Manager MUST also set IfMatch member + // for each request to the value saved from Step 5" + getObjectRequest.EtagToMatch = _savedETag; + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Sending GetObject request with ByteRange={1}-{2}, IfMatchPresent={3}", + partNumber, startByte, endByte, !string.IsNullOrEmpty(_savedETag)); + } + + response = await _s3Client.GetObjectAsync(getObjectRequest, cancellationToken).ConfigureAwait(false); + ownsResponse = true; // We now own the response + + // Attach progress callback to response if provided + if (progressCallback != null) + { + response.WriteObjectProgressEvent += progressCallback; + } + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] GetObject response received - ContentLength={1}", + partNumber, response.ContentLength); + + // SEP Part GET Step 5 / Ranged GET Step 7: Validate ContentRange matches request + ValidateContentRange(response, partNumber, objectSize); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] ContentRange validation passed", partNumber); + + // Validate ETag consistency for SEP compliance + if (!string.IsNullOrEmpty(_savedETag) && !string.Equals(_savedETag, response.ETag, StringComparison.OrdinalIgnoreCase)) + { + _logger.Error(null, "MultipartDownloadManager: [Part {0}] ETag mismatch detected - object modified during download", partNumber); + throw new InvalidOperationException($"ETag mismatch detected for part {partNumber} - object may have been modified during download"); + } + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] ETag validation passed", partNumber); + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing part (handler will decide: stream or buffer)", partNumber); + + // Delegate data handling to the handler + // IMPORTANT: Handler takes ownership of response and is responsible for disposing it in ALL cases: + // - If streaming: StreamingDataSource takes ownership and disposes when consumer finishes reading + // - If buffering: Handler disposes immediately after copying data to buffer + // - On error: Handler disposes in its catch block before rethrowing + await _dataHandler.ProcessPartAsync(partNumber, response, cancellationToken).ConfigureAwait(false); + ownsResponse = false; // Ownership transferred to handler + + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] Processing completed successfully", partNumber); + } + finally + { + // Release semaphore after BOTH network download AND disk write complete + // Slot was acquired in the for loop before this task was created + _httpConcurrencySlots.Release(); + _logger.DebugFormat("MultipartDownloadManager: [Part {0}] HTTP concurrency slot released (Available: {1}/{2})", + partNumber, _httpConcurrencySlots.CurrentCount, _config.ConcurrentServiceRequests); + } + } + catch (Exception ex) + { + _logger.Error(ex, "MultipartDownloadManager: [Part {0}] Download failed", partNumber); + + // Dispose response if we still own it (error occurred before handler took ownership) + if (ownsResponse) + response?.Dispose(); + + // Release capacity on failure + _dataHandler.ReleaseCapacity(); + throw; + } + } + + + private async Task DiscoverUsingPartStrategyAsync(CancellationToken cancellationToken) + { + // Check for cancellation before making any S3 calls + cancellationToken.ThrowIfCancellationRequested(); + + // SEP Part GET Step 1: "create a new GetObject request copying all fields in DownloadRequest. + // Set partNumber to 1." + var firstPartRequest = CreateGetObjectRequest(); + firstPartRequest.PartNumber = 1; + + // Wait for both capacity types before making HTTP request (consistent with background parts) + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); + await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); + await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); + + GetObjectResponse firstPartResponse = null; + + // NOTE: Semaphore is NOT released here - it will be released in StartDownloadsAsync + // after Part 1 is processed. This ensures the semaphore controls both network download + // AND disk write for Part 1, consistent with Parts 2+ (see CreateDownloadTaskAsync) + + try + { + // SEP Part GET Step 2: "send the request and wait for the response in a non-blocking fashion" + firstPartResponse = await _s3Client.GetObjectAsync(firstPartRequest, cancellationToken).ConfigureAwait(false); + + if (firstPartResponse == null) + throw new InvalidOperationException("Failed to retrieve object from S3"); + + // SEP Part GET Step 3: Save ETag for later IfMatch validation in subsequent requests + _savedETag = firstPartResponse.ETag; + + // SEP Part GET Step 3: "check the response. First parse total content length from ContentRange + // of the GetObject response and save the value in a variable. The length is the numeric value + // after / delimiter. For example, given ContentRange=bytes 0-1/5, 5 is the total content length. + // Then check PartsCount." + if (firstPartResponse.PartsCount.HasValue && firstPartResponse.PartsCount.Value > 1) + { + // SEP Part GET Step 3: "If PartsCount in the response is larger than 1, it indicates there + // are more parts available to download. The S3 Transfer Manager MUST save etag from the + // response to a variable." + _discoveredPartCount = firstPartResponse.PartsCount.Value; + + // Parse total content length from ContentRange header + // For example, "bytes 0-5242879/52428800" -> extract 52428800 + var totalObjectSize = ExtractTotalSizeFromContentRange(firstPartResponse.ContentRange); + + // SEP Part GET Step 7 will use this response for creating DownloadResponse + // Keep the response with its stream (will be buffered in StartDownloadsAsync) + return new DownloadResult + { + TotalParts = firstPartResponse.PartsCount.Value, + ObjectSize = totalObjectSize, + InitialResponse = firstPartResponse // Keep response with stream + }; + } + else + { + // SEP Part GET Step 3: "If PartsCount is 1, go to Step 7." + _discoveredPartCount = 1; + + // Single part upload - return the response for immediate use (SEP Step 7) + return new DownloadResult + { + TotalParts = 1, + ObjectSize = firstPartResponse.ContentLength, + InitialResponse = firstPartResponse // Keep response with stream + }; + } + } + catch + { + // On error, release semaphore and dispose response before rethrowing + _httpConcurrencySlots.Release(); + firstPartResponse?.Dispose(); + throw; + } + } + + private async Task DiscoverUsingRangeStrategyAsync(CancellationToken cancellationToken) + { + // Check for cancellation before making any S3 calls + cancellationToken.ThrowIfCancellationRequested(); + + // Get target part size for RANGE strategy (already set in config from request or default) + var targetPartSize = _config.TargetPartSizeBytes; + + // SEP Ranged GET Step 1: "create a new GetObject request copying all fields in the original request. + // Set range value to bytes=0-{targetPartSizeBytes-1} to request the first part." + var firstRangeRequest = CreateGetObjectRequest(); + firstRangeRequest.ByteRange = new ByteRange(0, targetPartSize - 1); + + // Wait for both capacity types before making HTTP request (consistent with background parts) + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for buffer capacity"); + await _dataHandler.WaitForCapacityAsync(cancellationToken).ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadManager: [Part 1 Discovery] Waiting for HTTP concurrency slot"); + await _httpConcurrencySlots.WaitAsync(cancellationToken).ConfigureAwait(false); + + GetObjectResponse firstRangeResponse = null; + + // NOTE: Semaphore is NOT released here - it will be released in StartDownloadsAsync + // after Part 1 is processed. This ensures the semaphore controls both network download + // AND disk write for Part 1, consistent with Parts 2+ (see CreateDownloadTaskAsync) + + try + { + // SEP Ranged GET Step 2: "send the request and wait for the response in a non-blocking fashion" + firstRangeResponse = await _s3Client.GetObjectAsync(firstRangeRequest, cancellationToken).ConfigureAwait(false); + + // Defensive null check + if (firstRangeResponse == null) + throw new InvalidOperationException("Failed to retrieve object from S3"); + + // SEP Ranged GET Step 5: "save Etag from the response to a variable" + // (for IfMatch validation in subsequent requests) + _savedETag = firstRangeResponse.ETag; + + // SEP Ranged GET Step 3: "parse total content length from ContentRange of the GetObject response + // and save the value in a variable. The length is the numeric value after / delimiter. + // For example, given ContentRange=bytes0-1/5, 5 is the total content length." + // Check if ContentRange is null (object smaller than requested range) + if (firstRangeResponse.ContentRange == null) + { + // No ContentRange means we got the entire small object + _discoveredPartCount = 1; + + return new DownloadResult + { + TotalParts = 1, + ObjectSize = firstRangeResponse.ContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + + + // Parse total object size from ContentRange (e.g., "bytes 0-5242879/52428800" -> 52428800) + var totalContentLength = ExtractTotalSizeFromContentRange(firstRangeResponse.ContentRange); + + // SEP Ranged GET Step 4: "compare the parsed total content length from Step 3 with ContentLength + // of the response. If the parsed total content length equals to the value from ContentLength, + // it indicates this request contains all of the data. The request is finished, return the response." + if (totalContentLength == firstRangeResponse.ContentLength) + { + // Single part: total size equals returned ContentLength + // This request contains all of the data + _discoveredPartCount = 1; + + return new DownloadResult + { + TotalParts = 1, + ObjectSize = totalContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + + // SEP Ranged GET Step 4: "If they do not match, it indicates there are more parts available + // to download. Add a validation to verify that ContentLength equals to the targetPartSizeBytes." + if (firstRangeResponse.ContentLength != targetPartSize) + { + throw new InvalidOperationException( + $"Expected first part size {targetPartSize} bytes, but received {firstRangeResponse.ContentLength} bytes. " + + $"Total object size is {totalContentLength} bytes."); + } + + // SEP Ranged GET Step 5: "calculate number of requests required by performing integer division + // of total contentLength/targetPartSizeBytes. Save the number of ranged GET requests in a variable." + _discoveredPartCount = (int)Math.Ceiling((double)totalContentLength / targetPartSize); + + // SEP Ranged GET Step 9 will use this response for creating DownloadResponse + // Keep the response with its stream (will be buffered in StartDownloadsAsync) + return new DownloadResult + { + TotalParts = _discoveredPartCount, + ObjectSize = totalContentLength, + InitialResponse = firstRangeResponse // Keep response with stream + }; + } + catch + { + // On error, release semaphore and dispose response before rethrowing + _httpConcurrencySlots.Release(); + firstRangeResponse?.Dispose(); + throw; + } + } + + private GetObjectRequest CreateGetObjectRequest() + { + var request = RequestMapper.MapToGetObjectRequest(_request); + + // Attach user agent handler if provided + if (_requestEventHandler != null) + { + ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)request) + .AddBeforeRequestHandler(_requestEventHandler); + } + + return request; + } + + internal (long startByte, long endByte) CalculatePartRange(int partNumber, long objectSize) + { + var targetPartSize = _config.TargetPartSizeBytes; + + var startByte = (partNumber - 1) * targetPartSize; + var endByte = Math.Min(startByte + targetPartSize - 1, objectSize - 1); + return (startByte, endByte); + } + + internal (long startByte, long endByte, long totalSize) ParseContentRange(string contentRange) + { + // Delegate to centralized ContentRange parsing utility + return ContentRangeParser.Parse(contentRange); + } + + internal long ExtractTotalSizeFromContentRange(string contentRange) + { + // Delegate to centralized ContentRange parsing utility + return ContentRangeParser.GetTotalSize(contentRange); + } + + internal void ValidateContentRange(GetObjectResponse response, int partNumber, long objectSize) + { + // Ranged GET Step 7: + // "validate that ContentRange matches with the requested range" + if (_request.MultipartDownloadType == MultipartDownloadType.RANGE) + { + var (expectedStartByte, expectedEndByte) = CalculatePartRange(partNumber, objectSize); + + // Parse actual ContentRange from response using unified helper + if (string.IsNullOrEmpty(response.ContentRange)) + { + throw new InvalidOperationException($"ContentRange header missing from part {partNumber} response"); + } + + var (actualStartByte, actualEndByte, _) = ParseContentRange(response.ContentRange); + + // Validate range matches what we requested + if (actualStartByte != expectedStartByte || actualEndByte != expectedEndByte) + { + throw new InvalidOperationException( + $"ContentRange mismatch for part {partNumber}. " + + $"Expected: bytes {expectedStartByte}-{expectedEndByte}, " + + $"Actual: bytes {actualStartByte}-{actualEndByte}"); + } + } + } + + /// + /// Creates progress args with aggregated values for multipart downloads. + /// + private WriteObjectProgressArgs CreateProgressArgs(long incrementTransferred, long transferredBytes, bool completed = false) + { + string filePath = (_request as TransferUtilityDownloadRequest)?.FilePath; + + return new WriteObjectProgressArgs( + _request.BucketName, + _request.Key, + filePath, + _request.VersionId, + incrementTransferred, + transferredBytes, + _totalObjectSize, + completed + ); + } + + /// + /// Progress aggregation callback that combines progress across all concurrent part downloads. + /// Uses thread-safe counter increment to handle concurrent updates. + /// Detects completion naturally when transferred bytes reaches total size. + /// Uses atomic flag to ensure completion event fires exactly once and prevents any events after completion. + /// + private void DownloadPartProgressEventCallback(object sender, WriteObjectProgressArgs e) + { + long transferredBytes = Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred); + + // Check if completion was already fired - if so, skip this event entirely + // This prevents the race condition where per-part completion events arrive after + // the aggregated completion event has already been fired + if (Interlocked.Read(ref _completionEventFired) == 1) + { + return; // Already completed, don't fire any more events + } + + // Use atomic CompareExchange to ensure only first thread fires completion + bool isComplete = false; + if (transferredBytes == _totalObjectSize) + { + // CompareExchange returns the original value before the exchange + // If original value was 0 (false), we're the first thread and should fire completion + long originalValue = Interlocked.CompareExchange(ref _completionEventFired, 1, 0); + if (originalValue == 0) // Was false, now set to true + { + isComplete = true; + } + else + { + // Another thread already fired completion, skip this event + return; + } + } + + // Create and fire aggregated progress event + // Only reached if completion hasn't been fired yet + var aggregatedArgs = CreateProgressArgs(e.IncrementTransferred, transferredBytes, isComplete); + _userProgressCallback?.Invoke(this, aggregatedArgs); + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(MultipartDownloadManager)); + } + + #region Dispose Pattern + + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + // Only dispose HTTP throttler if we own it + if (_ownsHttpThrottler) + { + _httpConcurrencySlots?.Dispose(); + } + _dataHandler?.Dispose(); + } + catch (Exception) + { + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index 3b9532793578..c9d7041aa987 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -37,7 +37,7 @@ namespace Amazon.S3.Transfer.Internal /// /// The command to manage an upload using the S3 multipart API. /// - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { IAmazonS3 _s3Client; long _partSize; @@ -50,13 +50,7 @@ internal partial class MultipartUploadCommand : BaseCommand Queue _partsToUpload = new Queue(); long _contentLength; - private static Logger Logger - { - get - { - return Logger.GetLogger(typeof(TransferUtility)); - } - } + private readonly Logger _logger = Logger.GetLogger(typeof(MultipartUploadCommand)); /// /// Initializes a new instance of the class. @@ -70,11 +64,11 @@ internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config if (fileTransporterRequest.IsSetFilePath()) { - Logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); + _logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); } else { - Logger.DebugFormat("Beginning upload of stream."); + _logger.DebugFormat("Beginning upload of stream."); } this._s3Client = s3Client; @@ -95,7 +89,7 @@ internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config } } - Logger.DebugFormat("Upload part size {0}.", this._partSize); + _logger.DebugFormat("Upload part size {0}.", this._partSize); } private static long calculatePartSize(long contentLength, long targetPartSize) @@ -387,10 +381,46 @@ private void UploadPartProgressEventCallback(object sender, UploadProgressArgs e long transferredBytes = Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred - e.CompensationForRetry); var progressArgs = new UploadProgressArgs(e.IncrementTransferred, transferredBytes, this._contentLength, - e.CompensationForRetry, this._fileTransporterRequest.FilePath); + e.CompensationForRetry, this._fileTransporterRequest.FilePath, this._fileTransporterRequest); this._fileTransporterRequest.OnRaiseProgressEvent(progressArgs); } + private void FireTransferInitiatedEvent() + { + var initiatedArgs = new UploadInitiatedEventArgs( + request: _fileTransporterRequest, + totalBytes: _contentLength, + filePath: _fileTransporterRequest.FilePath + ); + + _fileTransporterRequest.OnRaiseTransferInitiatedEvent(initiatedArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityUploadResponse response) + { + var completedArgs = new UploadCompletedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + response: response, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferCompletedEvent(completedArgs); + } + + private void FireTransferFailedEvent() + { + var failedArgs = new UploadFailedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferFailedEvent(failedArgs); + } + /// /// /// If a checksum algorithm was not specified, we MUST add the default value used by the SDK (as the individual part diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs index 57eab52d3f98..0fdfc64bcbae 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs @@ -29,7 +29,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityOpenStreamRequest _request; @@ -59,10 +59,5 @@ internal Stream ResponseStream { get { return this._responseStream; } } - - public override object Return - { - get { return this.ResponseStream; } - } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamWithResponseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamWithResponseCommand.cs new file mode 100644 index 000000000000..7d5b8258c2f8 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamWithResponseCommand.cs @@ -0,0 +1,48 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; + +using Amazon.S3; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Enhanced OpenStream command that uses BufferedMultipartStream for improved multipart download handling. + /// + internal partial class OpenStreamWithResponseCommand : BaseCommand + { + IAmazonS3 _s3Client; + TransferUtilityOpenStreamRequest _request; + TransferUtilityConfig _config; + + internal OpenStreamWithResponseCommand(IAmazonS3 s3Client, TransferUtilityOpenStreamRequest request, TransferUtilityConfig config) + { + this._s3Client = s3Client; + this._request = request; + this._config = config; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs new file mode 100644 index 000000000000..33edf2fa0ad1 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/PartBufferManager.cs @@ -0,0 +1,626 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using Amazon.Runtime.Internal.Util; +using System; +using System.Collections.Concurrent; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Manages buffered parts for multipart downloads with memory flow control and sequential consumption. + /// Implements a producer-consumer pattern where download tasks produce buffered parts and the read stream consumes them in order. + /// + /// + /// This class coordinates concurrent multipart downloads while ensuring sequential reading and bounded memory usage. + /// + /// SYNCHRONIZATION PRIMITIVES AND THEIR PURPOSES: + /// + /// 1. _nextExpectedPartNumber (volatile int) + /// - Purpose: Tracks which part to read next, ensuring sequential consumption + /// - Synchronization: volatile keyword for memory visibility across threads + /// - Readers: Producer threads (download tasks) check if their part matches to decide stream-vs-buffer + /// - Writer: Consumer thread (single) increments after consuming each part + /// - Thread safety: volatile ensures producer threads see latest value (prevents stale cached reads) + /// + /// 2. _completionState (volatile of bool and ) + /// - Purpose: Atomically tracks download completion status and any error + /// - Synchronization: volatile keyword + atomic reference assignment + /// - Why combined: _downloadComplete and _downloadException must be read together + /// consistently. reference assignment is atomic in .NET (prevents partial reads). + /// - Reads: Direct volatile read gets both values atomically + /// - Writes: Simple assignment is atomic for references, volatile ensures visibility + /// + /// 3. _bufferSpaceAvailable (slot counter) + /// - Purpose: Flow control to limit memory usage by limiting concurrent buffered parts + /// - Capacity: Set to (e.g., 10 parts) + /// - Example: If 10 parts are buffered in memory and part 1 is still being read, a download + /// task attempting to buffer part 11 must wait. Once part 1 is consumed and disposed, + /// its buffer slot is released, allowing part 11 to be buffered. + /// - Critical: Prevents unbounded memory growth during large multipart downloads + /// + /// 4. _partAvailable (signal for readers) + /// - Purpose: Signals when new parts are added or download completes + /// - Signaled by: AddBufferAsync (when new part added), MarkDownloadComplete (when done) + /// - Waited on by: ReadFromCurrentPartAsync (when expected part not yet available) + /// - Example: Reader waits for part 3. When a download task adds part 3 to the dictionary, + /// it signals this event, waking the waiting reader to proceed. + /// - Automatically resets after waking one waiting reader + /// + /// 5. _partDataSources (dictionary storing parts) + /// - Purpose: Thread-safe storage of buffered part data indexed by part number + /// - Key: Part number (allows quickly finding the next part to read) + /// - Example: Download tasks 1-10 run concurrently, each adding their buffered part to the + /// dictionary when ready. The reader sequentially consumes part 1, then 2, then 3, etc., + /// even if they arrived in a different order (e.g., 3, 1, 5, 2, 4). + /// + /// PRODUCER-CONSUMER FLOW: + /// + /// Producer Flow (Download Tasks buffering parts): + /// 1. Wait for buffer space: await + /// - Blocks if are already buffered in memory + /// - Example: With MaxInMemoryParts=10, if parts 5-14 are buffered, the task downloading + /// part 15 blocks here until the reader consumes and releases part 5's buffer + /// 2. Read part data from S3 into pooled buffer + /// 3. Add buffered part: await + /// - Adds buffer to _partDataSources dictionary + /// - Signals _partAvailable to wake consumer if waiting + /// 4. Consumer eventually releases the buffer slot after reading the part + /// + /// Consumer Flow (Read Stream reading parts sequentially): + /// 1. Check if expected part (_nextExpectedPartNumber) is available in dictionary + /// 2. If not available, wait on _partAvailable event + /// - Example: Waiting for part 2, even if parts 3, 5, 7 are already available + /// - Also checks for download completion while waiting to detect end-of-file + /// 3. Once available, read from the part's buffer sequentially + /// 4. When part is fully read ( = true): + /// - Remove part from dictionary + /// - Dispose data source (returns buffer to ArrayPool) + /// - Call (frees slot for producer to buffer next part) + /// - Increment _nextExpectedPartNumber (simple increment, no synchronization needed) + /// 5. Continue to next part to fill caller's buffer across part boundaries if needed + /// + /// SEQUENTIAL GUARANTEE: + /// The _nextExpectedPartNumber field ensures parts are consumed in order, even when they + /// arrive out of order. The consumer always waits for the next sequential part before + /// reading, regardless of what other parts are already buffered. + /// + /// Example scenario with 5-part download: + /// - Download order: Part 3 arrives, then 1, then 5, then 2, then 4 + /// - Parts in dictionary: {3, 1, 5} then {3, 1, 5, 2} then {3, 1, 5, 2, 4} + /// - Reader consumption order: Waits for 1, reads 1, advances to 2, waits for 2, reads 2, + /// advances to 3, reads 3 (already available), advances to 4, waits for 4, etc. + /// - Final read order: 1, 2, 3, 4, 5 (sequential, regardless of arrival order) + /// + /// MEMORY MANAGEMENT: + /// This bounded buffer approach prevents memory exhaustion on large files: + /// - Without flow control: All parts could be buffered simultaneously (e.g., 1000 parts × 10MB = 10GB) + /// - With flow control (=10): Maximum 10 parts buffered (10 × 10MB = 100MB) + /// - The semaphore creates backpressure on download tasks when memory limit is reached + /// + internal class PartBufferManager : IPartBufferManager + { + #region Private members + + // Stores buffered parts by their part number so we can quickly find them. + // Example: If parts arrive as 3, 1, 5, they're stored as {3: buffer3, 1: buffer1, 5: buffer5} + // but consumed in order: 1, 2 (wait), 3, 4 (wait), 5. + private readonly ConcurrentDictionary _partDataSources; + + // Limits how many parts can be buffered in memory at once. + // Capacity set to MaxInMemoryParts (e.g., 10 parts). Download tasks wait here + // before buffering new parts if the limit is reached. Consumers release slots + // after disposing consumed part buffers. + // Example: With limit=10, if parts 1-10 are buffered and part 1 is being read, + // the download task for part 11 blocks here. Once part 1 is consumed and its + // buffer returned to the pool via ReleaseBufferSpace(), part 11 can be buffered. + private readonly SemaphoreSlim _bufferSpaceAvailable; + + // Signals when new parts are added or download completes. + // Automatically resets after waking one waiting reader. + // Signaled by: AddDataSource when new part added, MarkDownloadComplete when finished. + // Waited on by: ReadFromCurrentPartAsync when expected part not yet available. + // Example: Reader waits for part 4. When download task adds part 4, it signals + // this event, immediately waking the reader to proceed with consumption. + private readonly AutoResetEvent _partAvailable; + + // Tracks the next part number to consume sequentially. Ensures in-order reading. + // SYNCHRONIZATION: volatile keyword for memory visibility + // - Consumer thread writes: Increments after fully consuming each part + // - Producer threads read: Check if their part matches to decide stream-vs-buffer + // - volatile ensures all threads see the most recent value (prevents stale cached reads) + // + // Example: Set to 1 initially. After reading part 1, incremented to 2. + // Even if part 5 is available, consumer waits for part 2 before proceeding. + // Producer threads checking this value will always see the latest increment. + private volatile int _nextExpectedPartNumber = 1; + + // Stores download completion status and any error as an atomic unit. + // SYNCHRONIZATION: volatile keyword + atomic reference assignment + // Item1: bool indicating if download is complete + // Item2: if download failed, null if successful + // + // Why instead of separate fields: + // - Reference assignment is atomic in .NET (prevents partial reads) + // - volatile ensures all threads see the latest instance + // - Reading the tuple gives us both values consistently in a single atomic operation + // - No race condition where we read complete equals true but exception has not been set yet + // + // Usage: + // Read: var state = _completionState; if (state.Item1) then check state.Item2 for error + // Write: _completionState = Tuple.Create(true, exception); + private volatile Tuple _completionState = Tuple.Create(false, (Exception)null); + + private bool _disposed = false; + + #endregion + + #region Logger + + private Logger Logger + { + get + { + return Logger.GetLogger(typeof(TransferUtility)); + } + } + + #endregion + + /// + /// Initializes a new instance of the class. + /// + /// The with buffer management settings. + /// Thrown when is null. + public PartBufferManager(BufferedDownloadConfiguration config) + { + if (config == null) + throw new ArgumentNullException(nameof(config)); + + _partDataSources = new ConcurrentDictionary(); + _bufferSpaceAvailable = new SemaphoreSlim( + config.MaxInMemoryParts, // initialCount + config.MaxInMemoryParts // maxCount - prevents exceeding configured limit + ); + _partAvailable = new AutoResetEvent(false); + + Logger.DebugFormat("PartBufferManager initialized with MaxInMemoryParts={0}", config.MaxInMemoryParts); + } + + /// + public int NextExpectedPartNumber + { + get + { + // Direct read is safe - only the consumer thread accesses this field + // No synchronization needed: int reads are naturally atomic on all platforms + return _nextExpectedPartNumber; + } + } + + /// + /// + /// This method is called by download tasks before buffering a new part. If + /// are already buffered, the task blocks here until the consumer reads and disposes a part, + /// freeing a slot via . + /// + /// Example: With MaxInMemoryParts=10: + /// - Parts 1-10 are buffered in memory + /// - Download task for part 11 calls this method and blocks + /// - Consumer reads and completes part 1, calls + /// - This method returns, allowing part 11 to be buffered + /// + public async Task WaitForBufferSpaceAsync(CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + var availableBefore = _bufferSpaceAvailable.CurrentCount; + Logger.DebugFormat("PartBufferManager: Waiting for buffer space (Available slots before wait: {0})", availableBefore); + + await _bufferSpaceAvailable.WaitAsync(cancellationToken).ConfigureAwait(false); + + var availableAfter = _bufferSpaceAvailable.CurrentCount; + Logger.DebugFormat("PartBufferManager: Buffer space acquired (Available slots after acquire: {0})", availableAfter); + } + + /// + /// Adds a part data source to the dictionary and signals waiting consumers. + /// + /// The to add. + /// Thrown when is null. + /// Thrown when attempting to add a duplicate part number. + /// + /// This method is thread-safe and can be called concurrently by multiple download tasks. + /// After adding the part to the dictionary, it signals _partAvailable to wake any consumer + /// waiting for this specific part number. + /// + /// Example: Download tasks for parts 3, 1, 5 all call this concurrently: + /// - Each adds to dictionary with their part number as key + /// - Each signals _partAvailable + /// - Consumer waiting for part 1 wakes up when part 1 is added + /// + public void AddDataSource(IPartDataSource dataSource) + { + ThrowIfDisposed(); + + if (dataSource == null) + throw new ArgumentNullException(nameof(dataSource)); + + Logger.DebugFormat("PartBufferManager: Adding part {0} (BufferedParts count before add: {1})", + dataSource.PartNumber, _partDataSources.Count); + + // Add the data source to the collection + if (!_partDataSources.TryAdd(dataSource.PartNumber, dataSource)) + { + // Duplicate part number - this shouldn't happen in normal operation + Logger.Error(null, "PartBufferManager: Duplicate part {0} attempted to be added", dataSource.PartNumber); + dataSource?.Dispose(); // Clean up the duplicate part + throw new InvalidOperationException($"Duplicate part {dataSource.PartNumber} attempted to be added"); + } + + Logger.DebugFormat("PartBufferManager: Part {0} added successfully (BufferedParts count after add: {1}). Signaling _partAvailable.", + dataSource.PartNumber, _partDataSources.Count); + + // Signal that a new part is available + _partAvailable.Set(); + } + + /// + public void AddBuffer(StreamPartBuffer buffer) + { + ThrowIfDisposed(); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + + // Create a BufferedDataSource and add it + var bufferedSource = new BufferedDataSource(buffer); + AddDataSource(bufferedSource); + } + + /// + public void AddBuffer(IPartDataSource dataSource) + { + ThrowIfDisposed(); + + if (dataSource == null) + throw new ArgumentNullException(nameof(dataSource)); + + // Delegate directly to AddDataSourceAsync which already handles IPartDataSource + AddDataSource(dataSource); + } + + /// + public async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + int totalBytesRead = 0; + + // Keep reading until buffer is full or we reach true EOF + // Note: We read across part boundaries to fill the buffer completely, matching standard Stream.Read() behavior + while (totalBytesRead < count) + { + var (bytesRead, shouldContinue) = await ReadFromCurrentPartAsync( + buffer, + offset + totalBytesRead, + count - totalBytesRead, + cancellationToken).ConfigureAwait(false); + + totalBytesRead += bytesRead; + + if (!shouldContinue) + break; + } + + return totalBytesRead; + } + + /// + /// Reads from the current expected part, handling availability, consumption, and cleanup. + /// Returns (bytesRead, shouldContinue) where shouldContinue indicates if more data is available. + /// + /// + /// This method implements the core sequential consumption logic with these responsibilities: + /// - Waiting for the next expected part to arrive (even if later parts already available) + /// - Reading data from the part's buffer + /// - Cleaning up completed parts (disposing buffer, releasing slot, advancing counter) + /// - Detecting download completion and errors + /// + /// SEQUENTIAL CONSUMPTION EXAMPLE: + /// Scenario: Downloading 5-part file, parts arrive out of order + /// + /// Initial state: _nextExpectedPartNumber = 1, dictionary is empty + /// + /// Step 1: Part 3 arrives first + /// - Dictionary: {3: buffer3} + /// - Consumer calls this method, expects part 1 + /// - ContainsKey(1) = false, enters wait loop + /// - Waits on _partAvailable event + /// + /// Step 2: Part 1 arrives + /// - Dictionary: {3: buffer3, 1: buffer1} + /// - AddDataSourceAsync signals _partAvailable + /// - Consumer wakes up, checks ContainsKey(1) = true, exits wait loop + /// - Reads from part 1's buffer + /// - Part 1 becomes complete (IsComplete = true) + /// - Removes part 1 from dictionary: {3: buffer3} + /// - Disposes buffer (returns to ArrayPool) + /// - Releases buffer slot (ReleaseBufferSpace) + /// - Increments counter: _nextExpectedPartNumber = 2 + /// - Returns (bytesRead, shouldContinue=true) to fill more of caller's buffer + /// + /// Step 3: Next iteration, now expecting part 2 + /// - Dictionary: {3: buffer3} + /// - ContainsKey(2) = false, enters wait loop again + /// - Waits for part 2, even though part 3 is already available + /// + /// This continues until all parts are consumed in order: 1, 2, 3, 4, 5 + /// + private async Task<(int bytesRead, bool shouldContinue)> ReadFromCurrentPartAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken) + { + var currentPartNumber = _nextExpectedPartNumber; + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Expecting part {0} (Requested bytes: {1}, BufferedParts count: {2})", + currentPartNumber, count, _partDataSources.Count); + + // Wait for the current part to become available. + // This loop handles out-of-order part arrival - we always wait for the next + // sequential part (_nextExpectedPartNumber) before proceeding, even if later + // parts are already available in the dictionary. + // Example: If parts 3, 5, 7 are available but we need part 2, we wait here. + while (!_partDataSources.ContainsKey(currentPartNumber)) + { + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} not yet available. Waiting on _partAvailable event...", + currentPartNumber); + + // Check for completion first to avoid indefinite waiting. + var state = _completionState; + if (state.Item1) // Check if download is complete + { + if (state.Item2 != null) // Check for exception + { + Logger.Error(state.Item2, "PartBufferManager.ReadFromCurrentPart: Download failed while waiting for part {0}", + currentPartNumber); + throw new InvalidOperationException("Multipart download failed", state.Item2); + } + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Download complete, part {0} not available. Returning EOF.", + currentPartNumber); + // True EOF - all parts downloaded, no more data coming + return (0, false); + } + + // Wait for a part to become available. + // _partAvailable is signaled by: + // 1. AddDataSourceAsync when a new part is added to the dictionary + // 2. MarkDownloadComplete when all download tasks finish + // + // Example: Waiting for part 2. When download task completes buffering part 2 + // and calls AddDataSourceAsync, it signals this event, waking us to check again. + await Task.Run(() => _partAvailable.WaitOne(), cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Woke from _partAvailable wait. Rechecking for part {0}...", + currentPartNumber); + } + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} is available. Reading from data source...", + currentPartNumber); + + // At this point, the expected part is available in the dictionary. + // Double-check with TryGetValue for safety (handles rare race conditions). + if (!_partDataSources.TryGetValue(currentPartNumber, out var dataSource)) + { + // Log technical details for troubleshooting + Logger.Error(null, "PartBufferManager: Part {0} disappeared after availability check. This indicates a race condition in the buffer manager.", currentPartNumber); + + // Throw user-friendly exception + throw new InvalidOperationException("Multipart download failed due to an internal error."); + } + + try + { + // Read from this part's buffer into the caller's buffer. + var partBytesRead = await dataSource.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Read {0} bytes from part {1}. IsComplete={2}", + partBytesRead, currentPartNumber, dataSource.IsComplete); + + // If this part is fully consumed, perform cleanup and advance to next part. + if (dataSource.IsComplete) + { + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} is complete. Cleaning up and advancing to next part...", + currentPartNumber); + + // Remove from collection + _partDataSources.TryRemove(currentPartNumber, out _); + + // Clean up the data source (returns buffer to ArrayPool) + dataSource.Dispose(); + + // Release buffer space slot (allows producer to buffer the next part). + // This is critical for flow control - without this release, download tasks + // would eventually block waiting for space, even though we've consumed this part. + // Example: After consuming part 1 (freeing its slot), download task can now + // buffer part 11 if parts 2-10 are still being held. + ReleaseBufferSpace(); + + // Advance to next part. + _nextExpectedPartNumber++; + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Cleaned up part {0}. Next expected part: {1} (BufferedParts count: {2})", + currentPartNumber, _nextExpectedPartNumber, _partDataSources.Count); + + // Continue reading to fill buffer across part boundaries. + // This matches standard Stream.Read() behavior where we attempt to + // fill the caller's buffer completely before returning, even if it + // requires reading from multiple parts. + // Example: Caller requests 20MB, part 1 has 5MB remaining. We return + // (5MB, shouldContinue=true), then on next iteration read from part 2 + // to try to fill the remaining 15MB. + return (partBytesRead, true); + } + + // If part is not complete but we got 0 bytes, it's EOF + if (partBytesRead == 0) + { + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} returned 0 bytes (EOF)", currentPartNumber); + return (0, false); + } + + Logger.DebugFormat("PartBufferManager.ReadFromCurrentPart: Part {0} has more data. Returning {1} bytes (will resume on next call)", + currentPartNumber, partBytesRead); + + // Part still has more data available. Return what we read. + // We'll resume from this part on the next ReadAsync call. + return (partBytesRead, false); + } + catch (Exception ex) + { + Logger.Error(ex, "PartBufferManager.ReadFromCurrentPart: Error reading from part {0}: {1}", + currentPartNumber, ex.Message); + + // Clean up on failure to prevent resource leaks + dataSource?.Dispose(); + ReleaseBufferSpace(); + throw; + } + } + + /// + /// + /// Called by the consumer after fully reading and disposing a buffered part. + /// This method releases a slot in the _bufferSpaceAvailable semaphore, which may + /// unblock a download task waiting in . + /// + /// FLOW CONTROL CYCLE: + /// 1. Download task blocks in (slot count = 0) + /// 2. Consumer reads and completes a part + /// 3. Consumer calls this method (slot count = 1) + /// 4. Download task unblocks and can buffer next part + /// + /// Example: With =10, after consuming part 1, this allows part 11 to be buffered. + /// + public void ReleaseBufferSpace() + { + ThrowIfDisposed(); + + // Release buffer space when a consumer finishes with a part + _bufferSpaceAvailable.Release(); + + var availableAfter = _bufferSpaceAvailable.CurrentCount; + Logger.DebugFormat("PartBufferManager: Buffer space released (Available slots after release: {0})", availableAfter); + } + + /// + /// + /// Called by the download coordinator when all download tasks have finished. + /// This signals to the consumer that no more parts will arrive, allowing it to + /// detect end-of-file correctly even if waiting for a part that will never come. + /// + /// SYNCHRONIZATION: Simple assignment is safe because: + /// 1. Reference assignments are atomic in .NET + /// 2. volatile keyword ensures the new is immediately visible to all threads + /// 3. No lock needed - atomicity comes from single reference write + /// + /// Example: All 5 parts downloaded successfully + /// - Download coordinator calls MarkDownloadComplete(null) + /// - Creates new Tuple(true, null) and assigns atomically + /// - Consumer waiting for non-existent part 6 wakes up + /// - Consumer reads atomically, sees Item1=true, Item2=null + /// - Consumer returns EOF (0 bytes) + /// + public void MarkDownloadComplete(Exception exception) + { + if (exception != null) + { + Logger.Error(exception, "PartBufferManager: Download marked complete with error. Signaling completion."); + } + else + { + Logger.DebugFormat("PartBufferManager: Download marked complete successfully. Signaling completion."); + } + + // Create and assign new completion state atomically + // No lock needed: reference assignment is atomic, volatile ensures visibility + _completionState = Tuple.Create(true, exception); + + // Signal that completion status has changed. + // This wakes any consumer waiting in ReadFromCurrentPartAsync to check completion. + _partAvailable.Set(); + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(PartBufferManager)); + } + + #region Dispose Pattern + + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + // Clean up all data sources (both buffered and streaming) + foreach (var dataSource in _partDataSources.Values) + { + dataSource?.Dispose(); + } + _partDataSources.Clear(); + + // Clean up synchronization primitives + _bufferSpaceAvailable?.Dispose(); + _partAvailable?.Dispose(); + } + catch (Exception) + { + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/RequestMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/RequestMapper.cs new file mode 100644 index 000000000000..fb22d6a9ff55 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/RequestMapper.cs @@ -0,0 +1,92 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Utility class for mapping Transfer Utility request objects to S3 request objects. + /// Centralizes request creation logic to ensure consistency across different commands. + /// + internal static class RequestMapper + { + /// + /// Maps a BaseDownloadRequest to GetObjectRequest. + /// Includes comprehensive property mappings for all supported download scenarios. + /// + /// The BaseDownloadRequest to map from + /// A new GetObjectRequest with mapped fields + /// Thrown when request is null + internal static GetObjectRequest MapToGetObjectRequest(BaseDownloadRequest request) + { + if (request == null) + throw new ArgumentNullException(nameof(request)); + + var getRequest = new GetObjectRequest + { + BucketName = request.BucketName, + Key = request.Key, + VersionId = request.VersionId + }; + + // Map date conditions + if (request.IsSetModifiedSinceDate()) + { + getRequest.ModifiedSinceDate = request.ModifiedSinceDate; + } + if (request.IsSetUnmodifiedSinceDate()) + { + getRequest.UnmodifiedSinceDate = request.UnmodifiedSinceDate; + } + + // Map server-side encryption properties + getRequest.ServerSideEncryptionCustomerMethod = request.ServerSideEncryptionCustomerMethod; + getRequest.ServerSideEncryptionCustomerProvidedKey = request.ServerSideEncryptionCustomerProvidedKey; + getRequest.ServerSideEncryptionCustomerProvidedKeyMD5 = request.ServerSideEncryptionCustomerProvidedKeyMD5; + + // Map additional properties + getRequest.ChecksumMode = request.ChecksumMode; + getRequest.RequestPayer = request.RequestPayer; + + // Map ownership and ETag matching properties + if (request.IsSetExpectedBucketOwner()) + { + getRequest.ExpectedBucketOwner = request.ExpectedBucketOwner; + } + if (request.IsSetIfMatch()) + { + getRequest.EtagToMatch = request.IfMatch; + } + if (request.IsSetIfNoneMatch()) + { + getRequest.EtagToNotMatch = request.IfNoneMatch; + } + + // Map response header overrides + getRequest.ResponseHeaderOverrides = request.ResponseHeaderOverrides; + + return getRequest; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs new file mode 100644 index 000000000000..2ba493556c35 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -0,0 +1,194 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Generic; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Utility class for mapping S3 response objects to TransferUtilityUploadResponse. + /// Maps fields based on the mapping.json configuration used by the Transfer Utility. + /// + internal static class ResponseMapper + { + /// + /// Maps a PutObjectResponse to TransferUtilityUploadResponse. + /// Uses the field mappings defined in mapping.json "Conversion" -> "PutObjectResponse" -> "UploadResponse". + /// + /// The PutObjectResponse to map from + /// A new TransferUtilityUploadResponse with mapped fields + /// Thrown when source is null + internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResponse source) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + + var response = new TransferUtilityUploadResponse(); + + // Map all fields as defined in mapping.json "Conversion" -> "PutObjectResponse" -> "UploadResponse" + response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + response.ChecksumCRC32 = source.ChecksumCRC32; + response.ChecksumCRC32C = source.ChecksumCRC32C; + response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + response.ChecksumSHA1 = source.ChecksumSHA1; + response.ChecksumSHA256 = source.ChecksumSHA256; + response.ChecksumType = source.ChecksumType; + response.ETag = source.ETag; + response.Expiration = source.Expiration; + response.RequestCharged = source.RequestCharged; + response.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; + response.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; + response.ServerSideEncryptionKeyManagementServiceEncryptionContext = source.ServerSideEncryptionKeyManagementServiceEncryptionContext; + response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + response.VersionId = source.VersionId; + response.Size = source.Size; + + return response; + } + + /// + /// Maps a CompleteMultipartUploadResponse to TransferUtilityUploadResponse. + /// Uses the field mappings defined in mapping.json "Conversion" -> "CompleteMultipartResponse" -> "UploadResponse". + /// + /// The CompleteMultipartUploadResponse to map from + /// A new TransferUtilityUploadResponse with mapped fields + /// Thrown when source is null + internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse(CompleteMultipartUploadResponse source) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + + var response = new TransferUtilityUploadResponse(); + + // Map all fields as defined in mapping.json "Conversion" -> "CompleteMultipartResponse" -> "UploadResponse" + response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + response.ChecksumCRC32 = source.ChecksumCRC32; + response.ChecksumCRC32C = source.ChecksumCRC32C; + response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + response.ChecksumSHA1 = source.ChecksumSHA1; + response.ChecksumSHA256 = source.ChecksumSHA256; + response.ChecksumType = source.ChecksumType; + response.ETag = source.ETag; + response.Expiration = source.Expiration; + response.RequestCharged = source.RequestCharged; + response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + response.VersionId = source.VersionId; + response.BucketName = source.BucketName; + response.Key = source.Key; + response.Location = source.Location; + + return response; + } + + /// + /// Private helper method to populate the common properties from GetObjectResponse to the base response class. + /// Contains all the shared mapping logic for GetObjectResponse fields. + /// + /// The GetObjectResponse to map from + /// The TransferUtilityGetObjectResponseBase to populate + /// Thrown when source or target is null + private static void PopulateGetObjectResponseBase(GetObjectResponse source, TransferUtilityGetObjectResponseBase target) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + if (target == null) + throw new ArgumentNullException(nameof(target)); + + // Map all fields as defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse" + target.AcceptRanges = source.AcceptRanges; + target.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + target.ChecksumCRC32 = source.ChecksumCRC32; + target.ChecksumCRC32C = source.ChecksumCRC32C; + target.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + target.ChecksumSHA1 = source.ChecksumSHA1; + target.ChecksumSHA256 = source.ChecksumSHA256; + target.ChecksumType = source.ChecksumType; + target.ContentRange = source.ContentRange; + target.Headers = source.Headers; + target.DeleteMarker = source.DeleteMarker; + target.ETag = source.ETag; + target.Expiration = source.Expiration; + target.ExpiresString = source.ExpiresString; + target.LastModified = source.LastModified; + target.Metadata = source.Metadata; + target.MissingMeta = source.MissingMeta; + target.ObjectLockLegalHoldStatus = source.ObjectLockLegalHoldStatus; + target.ObjectLockMode = source.ObjectLockMode; + target.ObjectLockRetainUntilDate = source.ObjectLockRetainUntilDate; + target.PartsCount = source.PartsCount; + target.ReplicationStatus = source.ReplicationStatus; + target.RequestCharged = source.RequestCharged; + target.RestoreExpiration = source.RestoreExpiration; + target.RestoreInProgress = source.RestoreInProgress; + target.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; + target.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; + target.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + target.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + target.StorageClass = source.StorageClass; + target.TagCount = source.TagCount; + target.VersionId = source.VersionId; + target.WebsiteRedirectLocation = source.WebsiteRedirectLocation; + } + + /// + /// Maps a GetObjectResponse to TransferUtilityDownloadResponse. + /// Uses the field mappings defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse". + /// + /// The GetObjectResponse to map from + /// A new TransferUtilityDownloadResponse with mapped fields + /// Thrown when source is null + internal static TransferUtilityDownloadResponse MapGetObjectResponse(GetObjectResponse source) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + + var response = new TransferUtilityDownloadResponse(); + PopulateGetObjectResponseBase(source, response); + return response; + } + + /// + /// Maps a GetObjectResponse to TransferUtilityOpenStreamResponse. + /// Uses the same field mappings as DownloadResponse plus the ResponseStream property. + /// + /// The GetObjectResponse to map from + /// A new TransferUtilityOpenStreamResponse with mapped fields + /// Thrown when source is null + internal static TransferUtilityOpenStreamResponse MapGetObjectResponseToOpenStream(GetObjectResponse source) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + + var response = new TransferUtilityOpenStreamResponse(); + PopulateGetObjectResponseBase(source, response); + response.ResponseStream = source.ResponseStream; + + return response; + } + + + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs index aa247f84b9da..532061888d22 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs @@ -37,17 +37,23 @@ namespace Amazon.S3.Transfer.Internal /// /// This command is for doing regular PutObject requests. /// - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityConfig _config; TransferUtilityUploadRequest _fileTransporterRequest; + long _totalTransferredBytes; + private readonly long _contentLength; internal SimpleUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config, TransferUtilityUploadRequest fileTransporterRequest) { this._s3Client = s3Client; this._config = config; this._fileTransporterRequest = fileTransporterRequest; + + // Cache content length immediately while stream is accessible to avoid ObjectDisposedException in failure scenarios + this._contentLength = this._fileTransporterRequest.ContentLength; + var fileName = fileTransporterRequest.FilePath; } @@ -127,9 +133,48 @@ internal PutObjectRequest ConstructRequest() private void PutObjectProgressEventCallback(object sender, UploadProgressArgs e) { - var progressArgs = new UploadProgressArgs(e.IncrementTransferred, e.TransferredBytes, e.TotalBytes, - e.CompensationForRetry, _fileTransporterRequest.FilePath); + // Keep track of the total transferred bytes so that we can also return this value in case of failure + long transferredBytes = Interlocked.Add(ref _totalTransferredBytes, e.IncrementTransferred - e.CompensationForRetry); + + var progressArgs = new UploadProgressArgs(e.IncrementTransferred, transferredBytes, _contentLength, + e.CompensationForRetry, _fileTransporterRequest.FilePath, _fileTransporterRequest); this._fileTransporterRequest.OnRaiseProgressEvent(progressArgs); } + + private void FireTransferInitiatedEvent() + { + var initiatedArgs = new UploadInitiatedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferInitiatedEvent(initiatedArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityUploadResponse response) + { + var completedArgs = new UploadCompletedEventArgs( + request: _fileTransporterRequest, + response: response, + filePath: _fileTransporterRequest.FilePath, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferCompletedEvent(completedArgs); + } + + private void FireTransferFailedEvent() + { + var failedArgs = new UploadFailedEventArgs( + request: _fileTransporterRequest, + filePath: _fileTransporterRequest.FilePath, + transferredBytes: Interlocked.Read(ref _totalTransferredBytes), + totalBytes: _contentLength + ); + + _fileTransporterRequest.OnRaiseTransferFailedEvent(failedArgs); + } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/StreamPartBuffer.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamPartBuffer.cs new file mode 100644 index 000000000000..a850a9f9ad38 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamPartBuffer.cs @@ -0,0 +1,175 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Buffers; +using System.Diagnostics.CodeAnalysis; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Container for downloaded part data optimized for streaming scenarios. + /// Uses ArrayPool buffers and tracks reading position for sequential access + /// by BufferedMultipartStream. + /// + internal class StreamPartBuffer : IDisposable + { + private bool _disposed = false; + + /// + /// Gets or sets the part number for priority queue ordering. + /// For Part GET strategy: Uses the actual part number from the multipart upload. + /// For Range GET strategy: Calculated based on byte range position. + /// + public int PartNumber { get; set; } + + /// + /// Gets or sets the ArrayPool buffer containing the downloaded part data. + /// Ownership belongs to this StreamPartBuffer and will be returned to pool on disposal. + /// + public byte[] ArrayPoolBuffer { get; set; } + + /// + /// Gets or sets the current reading position within the buffer. + /// Used by BufferedMultipartStream for sequential reading. + /// + public int CurrentPosition { get; set; } = 0; + + /// + /// Gets the number of bytes remaining to be read from current position. + /// + public int RemainingBytes => Math.Max(0, Length - CurrentPosition); + + /// + /// Gets or sets the length of valid data in the ArrayPool buffer. + /// The buffer may be larger than this due to ArrayPool size rounding. + /// + public int Length { get; set; } + + /// + /// Creates a new StreamPartBuffer for streaming scenarios. + /// For internal use only - external callers should use Create() factory method. + /// + /// The part number for ordering + /// The ArrayPool buffer containing the data (ownership transferred) + /// The length of valid data in the buffer + internal StreamPartBuffer(int partNumber, byte[] arrayPoolBuffer, int length) + { + PartNumber = partNumber; + ArrayPoolBuffer = arrayPoolBuffer; + Length = length; + CurrentPosition = 0; + } + + /// + /// Creates a new StreamPartBuffer with a rented ArrayPool buffer. + /// The StreamPartBuffer takes ownership and will return the buffer on disposal. + /// + /// The part number for ordering + /// Initial capacity needed for the buffer + /// A StreamPartBuffer with rented buffer ready for writing + public static StreamPartBuffer Create(int partNumber, int capacity) + { + var buffer = ArrayPool.Shared.Rent(capacity); + return new StreamPartBuffer(partNumber, buffer, 0); // Length will be set after writing + } + + /// + /// Sets the length of valid data in the buffer after writing. + /// Can only be called once to prevent state corruption. + /// + /// The number of valid bytes written to the buffer + /// Thrown if length has already been set + /// Thrown if length is negative or exceeds buffer capacity + internal void SetLength(int length) + { + if (Length > 0) + throw new InvalidOperationException("Length has already been set and cannot be changed"); + + if (length < 0) + throw new ArgumentOutOfRangeException(nameof(length), "Length must be non-negative"); + + if (ArrayPoolBuffer != null && length > ArrayPoolBuffer.Length) + throw new ArgumentOutOfRangeException(nameof(length), "Length exceeds buffer capacity"); + + Length = length; + } + + /// + /// Returns a string representation of this StreamPartBuffer for debugging. + /// + /// A string describing this stream part buffer + public override string ToString() + { + return $"StreamPartBuffer(Part={PartNumber}, ArrayPool={Length} bytes, pos={CurrentPosition}, remaining={RemainingBytes})"; + } + + #region IDisposable Implementation + + /// + /// Releases all resources used by this StreamPartBuffer. + /// + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + /// + /// Releases the unmanaged resources used by the StreamPartBuffer and optionally releases the managed resources. + /// Returns the ArrayPool buffer back to the shared pool. + /// + /// True to release both managed and unmanaged resources; false to release only unmanaged resources. + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + protected virtual void Dispose(bool disposing) + { + if (!_disposed && disposing) + { + try + { + // Return ArrayPool buffer to shared pool + if (ArrayPoolBuffer != null) + { + ArrayPool.Shared.Return(ArrayPoolBuffer); + ArrayPoolBuffer = null; + } + } + catch (Exception) + { + + } + + _disposed = true; + } + } + + /// + /// Finalizer to ensure resources are cleaned up if Dispose is not called. + /// + ~StreamPartBuffer() + { + Dispose(false); + } + + #endregion + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/StreamingDataSource.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamingDataSource.cs new file mode 100644 index 000000000000..d203f27d0c61 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/StreamingDataSource.cs @@ -0,0 +1,230 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Stream-based data source that reads directly from GetObjectResponse without buffering. + /// Provides pass-through access to the response stream for optimal memory efficiency when parts arrive in order. + /// + /// + /// This class enables direct streaming optimization for multipart downloads. When a part arrives + /// and happens to be the next expected part in the sequence, we can bypass buffering entirely + /// and stream the response directly to the consumer. + /// + /// OWNERSHIP AND LIFECYCLE: + /// - Takes ownership of the GetObjectResponse and its stream + /// - Responsible for disposing the response (releases HTTP connection) + /// - Consumer reads directly from response stream via ReadAsync + /// - Must be disposed to release network resources + /// + /// THREAD SAFETY: + /// - Designed for single-threaded consumption by PartBufferManager + /// - PartBufferManager guarantees sequential access to each part + /// - No internal synchronization needed + /// + /// COMPLETION TRACKING: + /// - Tracks bytes read vs ContentLength to detect completion + /// - Sets IsComplete when stream exhausted OR expected bytes reached + /// - Handles both normal completion and premature stream closure + /// + internal class StreamingDataSource : IPartDataSource + { + private readonly GetObjectResponse _response; + private readonly Stream _responseStream; + private readonly long _expectedBytes; + private readonly int _partNumber; + private long _bytesRead; + private bool _isComplete; + private bool _disposed; + + #region Logger + + private Logger Logger + { + get + { + return Logger.GetLogger(typeof(TransferUtility)); + } + } + + #endregion + + /// + public int PartNumber + { + get + { + ThrowIfDisposed(); + return _partNumber; + } + } + + /// + public bool IsComplete + { + get + { + ThrowIfDisposed(); + return _isComplete; + } + } + + /// + /// Initializes a new instance of the class. + /// Takes ownership of the GetObjectResponse and its stream. + /// + /// The 1-based part number this source represents. + /// The GetObjectResponse containing the stream to read from. Ownership is transferred. + /// Thrown when is null. + /// + /// CRITICAL: This constructor takes ownership of the response. The caller must NOT dispose it. + /// The StreamingDataSource will dispose the response when it is disposed. + /// + public StreamingDataSource(int partNumber, GetObjectResponse response) + { + if (response == null) + throw new ArgumentNullException(nameof(response)); + + _partNumber = partNumber; + _response = response; + _responseStream = response.ResponseStream; + _expectedBytes = response.ContentLength; + _bytesRead = 0; + _isComplete = false; + + Logger.DebugFormat("StreamingDataSource: Created for part {0} (ExpectedBytes={1}, streaming directly from response)", + _partNumber, _expectedBytes); + } + + /// + /// + /// Reads directly from the underlying response stream without any buffering or copying. + /// This provides optimal memory efficiency and minimal latency for in-order part arrivals. + /// + /// COMPLETION DETECTION: + /// The source is marked complete when: + /// 1. Stream returns 0 bytes (normal EOF), OR + /// 2. We've read the expected number of bytes (ContentLength) + /// + /// ERROR HANDLING: + /// Any exceptions from the underlying stream (network errors, timeout, etc.) propagate directly + /// to the caller. The PartBufferManager will handle cleanup and error recovery. + /// + public async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + + if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset), "Offset must be non-negative"); + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Count must be non-negative"); + if (offset + count > buffer.Length) + throw new ArgumentException("Offset and count exceed buffer bounds"); + + if (_isComplete) + { + Logger.DebugFormat("StreamingDataSource: [Part {0}] Already complete, returning 0 bytes", PartNumber); + return 0; + } + + try + { + Logger.DebugFormat("StreamingDataSource: [Part {0}] Reading up to {1} bytes from response stream (BytesRead={2}/{3})", + PartNumber, count, _bytesRead, _expectedBytes); + + // Direct delegation to response stream - no buffering, just pass-through + var bytesRead = await _responseStream.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); + + _bytesRead += bytesRead; + + Logger.DebugFormat("StreamingDataSource: [Part {0}] Read {1} bytes from response stream (TotalBytesRead={2}/{3})", + PartNumber, bytesRead, _bytesRead, _expectedBytes); + + // Mark complete when stream exhausted OR we've read expected bytes + if (bytesRead == 0 || _bytesRead >= _expectedBytes) + { + _isComplete = true; + Logger.DebugFormat("StreamingDataSource: [Part {0}] Marked complete (BytesRead=0: {1}, ReachedExpected: {2})", + PartNumber, bytesRead == 0, _bytesRead >= _expectedBytes); + } + + return bytesRead; + } + catch (Exception ex) + { + Logger.Error(ex, "StreamingDataSource: [Part {0}] Error reading from response stream: {1}", + PartNumber, ex.Message); + + // Mark as complete on error to prevent further read attempts + _isComplete = true; + throw; + } + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(StreamingDataSource)); + } + + /// + /// + /// CRITICAL: Disposes the GetObjectResponse, which releases the HTTP connection back to the connection pool. + /// Failure to dispose will cause connection leaks and eventual connection pool exhaustion. + /// + [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Dispose methods should not throw exceptions")] + public void Dispose() + { + if (!_disposed) + { + try + { + Logger.DebugFormat("StreamingDataSource: [Part {0}] Disposing (Releasing HTTP connection, BytesRead={1}/{2})", + PartNumber, _bytesRead, _expectedBytes); + + // Dispose the response - this releases the HTTP connection + _response?.Dispose(); + } + catch (Exception ex) + { + Logger.Error(ex, "StreamingDataSource: [Part {0}] Error during disposal: {1}", + PartNumber, ex.Message); + + // Suppressing CA1031: Dispose methods should not throw exceptions + // Continue disposal process silently on any errors + } + + _disposed = true; + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs new file mode 100644 index 000000000000..acc3dce55b6f --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/TaskHelpers.cs @@ -0,0 +1,226 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Runtime.Internal.Util; + +namespace Amazon.S3.Transfer.Internal +{ + /// + /// Provides helper methods for Task-based operations. + /// + internal static class TaskHelpers + { + /// + /// Waits for all tasks to complete, failing fast on the first exception. + /// When any task faults, its exception is immediately propagated without waiting for other tasks. + /// + /// List of tasks to wait for completion. This list is not modified. + /// Cancellation token to observe (not actively checked - caller handles cancellation) + /// A task that represents the completion of all tasks or throws on first exception + /// + /// This method creates an internal copy of the task list for tracking purposes, + /// so the caller's list remains unchanged after this method completes. + /// The caller is responsible for cancelling remaining tasks when this method throws. + /// + internal static async Task WhenAllFailFastAsync(List pendingTasks, CancellationToken cancellationToken) + { + var remaining = new HashSet(pendingTasks); + int total = remaining.Count; + int processed = 0; + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllFailFastAsync: Starting with TotalTasks={0}", total); + + while (remaining.Count > 0) + { + // Wait for any task to complete + var completedTask = await Task.WhenAny(remaining) + .ConfigureAwait(continueOnCapturedContext: false); + + // Process the completed task - will throw if faulted + // The caller's catch block handles cancellation AFTER this exception propagates, + // which ensures the original exception is always thrown (not OperationCanceledException) + await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + + remaining.Remove(completedTask); + processed++; + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllFailFastAsync: Task completed (Processed={0}/{1}, Remaining={2})", + processed, total, remaining.Count); + } + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllFailFastAsync: All tasks completed (Total={0})", total); + } + + /// + /// Waits for all tasks to complete or till any task fails or is canceled. + /// + /// List of tasks to wait for completion. Note: This list is mutated during processing. + /// Cancellation token to observe + /// A task that represents the completion of all tasks or the first exception + internal static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, CancellationToken cancellationToken) + { + int processed = 0; + int total = pendingTasks.Count; + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); + + while (processed < total) + { + cancellationToken.ThrowIfCancellationRequested(); + + var completedTask = await Task.WhenAny(pendingTasks) + .ConfigureAwait(continueOnCapturedContext: false); + + // If RanToCompletion a response will be returned + // If Faulted or Canceled an appropriate exception will be thrown + await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + + pendingTasks.Remove(completedTask); + processed++; + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", + processed, total, pendingTasks.Count); + } + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); + } + + /// + /// Waits for all tasks to complete or till any task fails or is canceled. + /// Returns results from all completed tasks. + /// + /// The type of result returned by the tasks + /// List of tasks to wait for completion + /// Cancellation token to observe + /// A task that represents the completion of all tasks with their results, or the first exception + internal static async Task> WhenAllOrFirstExceptionAsync(List> pendingTasks, CancellationToken cancellationToken) + { + int processed = 0; + int total = pendingTasks.Count; + var responses = new List(); + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Starting with TotalTasks={0}", total); + + while (processed < total) + { + cancellationToken.ThrowIfCancellationRequested(); + + var completedTask = await Task.WhenAny(pendingTasks) + .ConfigureAwait(continueOnCapturedContext: false); + + //If RanToCompletion a response will be returned + //If Faulted or Canceled an appropriate exception will be thrown + var response = await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + responses.Add(response); + + pendingTasks.Remove(completedTask); + processed++; + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: Task completed (Processed={0}/{1}, Remaining={2})", + processed, total, pendingTasks.Count); + } + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.WhenAllOrFirstExceptionAsync: All tasks completed (Total={0})", total); + + return responses; + } + + /// + /// Executes work items with limited concurrency using a task pool pattern. + /// Creates only as many tasks as the concurrency limit allows, rather than creating + /// all tasks upfront. This reduces memory overhead for large collections. + /// + /// + /// This method provides a clean way to limit concurrent operations without creating + /// all tasks upfront. It maintains a pool of active tasks up to the maxConcurrency limit, + /// replacing completed tasks with new ones until all items are processed. + /// The caller is responsible for implementing failure handling within the processAsync function. + /// + /// The type of items to process + /// The collection of items to process + /// Maximum number of concurrent tasks + /// Async function to process each item + /// Cancellation token to observe + /// A task that completes when all items are processed, or throws on first failure + internal static async Task ForEachWithConcurrencyAsync( + IEnumerable items, + int maxConcurrency, + Func processAsync, + CancellationToken cancellationToken) + { + var itemList = items as IList ?? items.ToList(); + if (itemList.Count == 0) + { + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: No items to process"); + return; + } + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting with TotalItems={0}, MaxConcurrency={1}", + itemList.Count, maxConcurrency); + + int nextIndex = 0; + var activeTasks = new List(); + + // Start initial batch up to concurrency limit + int initialBatchSize = Math.Min(maxConcurrency, itemList.Count); + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting initial batch of {0} tasks", initialBatchSize); + + for (int i = 0; i < initialBatchSize; i++) + { + var task = processAsync(itemList[nextIndex++], cancellationToken); + activeTasks.Add(task); + } + + // Process completions and start new tasks until all work is done + while (activeTasks.Count > 0) + { + cancellationToken.ThrowIfCancellationRequested(); + + var completedTask = await Task.WhenAny(activeTasks) + .ConfigureAwait(continueOnCapturedContext: false); + + // Propagate exceptions (fail-fast behavior by default) + // Caller's processAsync function should handle failure policy if needed + await completedTask + .ConfigureAwait(continueOnCapturedContext: false); + + activeTasks.Remove(completedTask); + + int itemsCompleted = nextIndex - activeTasks.Count; + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Task completed (Active={0}, Completed={1}/{2}, Remaining={3})", + activeTasks.Count, itemsCompleted, itemList.Count, itemList.Count - itemsCompleted); + + // Start next task if more work remains + if (nextIndex < itemList.Count) + { + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: Starting next task (Index={0}/{1}, Active={2})", + nextIndex + 1, itemList.Count, activeTasks.Count + 1); + var nextTask = processAsync(itemList[nextIndex++], cancellationToken); + activeTasks.Add(nextTask); + } + } + + Logger.GetLogger(typeof(TaskHelpers)).DebugFormat("TaskHelpers.ForEachWithConcurrencyAsync: All items processed (Total={0})", itemList.Count); + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index e4be9b27aa74..b6c884a8361f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -20,6 +20,7 @@ * */ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Text; @@ -32,22 +33,65 @@ namespace Amazon.S3.Transfer.Internal /// This command files all the files that meets the criteria specified in the TransferUtilityUploadDirectoryRequest request /// and uploads them. /// - internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { + private IFailurePolicy _failurePolicy; + private ConcurrentBag _errors = new ConcurrentBag(); TransferUtilityUploadDirectoryRequest _request; TransferUtility _utility; TransferUtilityConfig _config; int _totalNumberOfFiles; int _numberOfFilesUploaded; + int _numberOfFilesSuccessfullyUploaded; long _totalBytes; - long _transferredBytes; + long _transferredBytes; + + #region Event Firing Methods + + private void FireTransferInitiatedEvent() + { + var eventArgs = new UploadDirectoryInitiatedEventArgs( + _request, + _totalNumberOfFiles, + _totalBytes); + _request.OnRaiseUploadDirectoryInitiatedEvent(eventArgs); + } + + private void FireTransferCompletedEvent(TransferUtilityUploadDirectoryResponse response) + { + var eventArgs = new UploadDirectoryCompletedEventArgs( + _request, + response, + _numberOfFilesSuccessfullyUploaded, + _totalNumberOfFiles, + Interlocked.Read(ref _transferredBytes), + _totalBytes); + _request.OnRaiseUploadDirectoryCompletedEvent(eventArgs); + } + + private void FireTransferFailedEvent() + { + var eventArgs = new UploadDirectoryFailedEventArgs( + _request, + _numberOfFilesSuccessfullyUploaded, + _totalNumberOfFiles, + Interlocked.Read(ref _transferredBytes), + _totalBytes); + _request.OnRaiseUploadDirectoryFailedEvent(eventArgs); + } + + #endregion internal UploadDirectoryCommand(TransferUtility utility, TransferUtilityConfig config, TransferUtilityUploadDirectoryRequest request) { this._utility = utility; this._request = request; this._config = config; + _failurePolicy = + request.FailurePolicy == FailurePolicy.AbortOnFailure + ? new AbortOnFailurePolicy() + : new ContinueOnFailurePolicy(_errors); } internal TransferUtilityUploadRequest ConstructRequest(string basePath, string filepath, string prefix) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs index 4e55afcd34e8..649c290fac2f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs @@ -24,10 +24,10 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { if (string.IsNullOrEmpty(this._request.BucketName)) { @@ -82,8 +82,10 @@ await asyncThrottler.WaitAsync(cancellationToken) } while (listResponse.IsTruncated.GetValueOrDefault()); - await WhenAllOrFirstExceptionAsync(pendingTasks,cancellationToken) + await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingTasks,cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + return new TransferUtilityAbortMultipartUploadsResponse(); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs index f9591f6d1d68..a7a58a4b02c5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs @@ -13,94 +13,16 @@ * permissions and limitations under the License. */ -using Amazon.S3.Model; -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Text; using System.Threading; using System.Threading.Tasks; namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + internal abstract partial class BaseCommand where TResponse : class { - public abstract Task ExecuteAsync(CancellationToken cancellationToken); - - /// - /// Waits for all of the tasks to complete or till any task fails or is canceled. - /// - protected static async Task> WhenAllOrFirstExceptionAsync(List> pendingTasks, CancellationToken cancellationToken) - { - int processed = 0; - int total = pendingTasks.Count; - var responses = new List(); - while (processed < total) - { - cancellationToken.ThrowIfCancellationRequested(); - - var completedTask = await Task.WhenAny(pendingTasks) - .ConfigureAwait(continueOnCapturedContext: false); - - //If RanToCompletion a response will be returned - //If Faulted or Canceled an appropriate exception will be thrown - var response = await completedTask - .ConfigureAwait(continueOnCapturedContext: false); - responses.Add(response); - - pendingTasks.Remove(completedTask); - processed++; - } - - return responses; - } - /// - /// Waits for all of the tasks to complete or till any task fails or is canceled. - /// - protected static async Task WhenAllOrFirstExceptionAsync(List pendingTasks, CancellationToken cancellationToken) - { - int processed = 0; - int total = pendingTasks.Count; - while (processed < total) - { - cancellationToken.ThrowIfCancellationRequested(); - - var completedTask = await Task.WhenAny(pendingTasks) - .ConfigureAwait(continueOnCapturedContext: false); - - //If RanToCompletion a response will be returned - //If Faulted or Canceled an appropriate exception will be thrown - await completedTask - .ConfigureAwait(continueOnCapturedContext: false); - - pendingTasks.Remove(completedTask); - processed++; - } - } - - protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) - { - try - { - await command.ExecuteAsync(internalCts.Token) - .ConfigureAwait(continueOnCapturedContext: false); - } - catch (Exception exception) - { - if (!(exception is OperationCanceledException)) - { - // Cancel scheduling any more tasks. - // Cancel other upload requests. - internalCts.Cancel(); - } - throw; - } - finally - { - throttler.Release(); - } - } + /// Executes the command and returns a typed response + /// + public abstract Task ExecuteAsync(CancellationToken cancellationToken); } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs index 3e536a4bb607..1249916d931c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs @@ -28,17 +28,22 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { + FireTransferInitiatedEvent(); + ValidateRequest(); + GetObjectRequest getRequest = ConvertToGetObjectRequest(this._request); var maxRetries = _s3Client.Config.MaxErrorRetry; var retries = 0; bool shouldRetry = false; string mostRecentETag = null; + TransferUtilityDownloadResponse lastSuccessfulMappedResponse = null; + long? totalBytesFromResponse = null; // Track total bytes once we have response headers do { shouldRetry = false; @@ -54,12 +59,16 @@ public override async Task ExecuteAsync(CancellationToken cancellationToken) using (var response = await this._s3Client.GetObjectAsync(getRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false)) { + // Capture total bytes from response headers as soon as we get them + totalBytesFromResponse = response.ContentLength; + if (!string.IsNullOrEmpty(mostRecentETag) && !string.Equals(mostRecentETag, response.ETag)) { //if the eTag changed, we need to retry from the start of the file mostRecentETag = response.ETag; getRequest.ByteRange = null; retries = 0; + Interlocked.Exchange(ref _totalTransferredBytes, 0); shouldRetry = true; WaitBeforeRetry(retries); continue; @@ -101,6 +110,8 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, false, can await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); } + + lastSuccessfulMappedResponse = ResponseMapper.MapGetObjectResponse(response); } } catch (Exception exception) @@ -109,6 +120,9 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, canc shouldRetry = HandleExceptionForHttpClient(exception, retries, maxRetries); if (!shouldRetry) { + // Pass total bytes if we have them from response headers, otherwise -1 for unknown + FireTransferFailedEvent(this._request.FilePath, Interlocked.Read(ref _totalTransferredBytes), totalBytesFromResponse ?? -1); + if (exception is IOException) { throw; @@ -130,6 +144,16 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, canc } WaitBeforeRetry(retries); } while (shouldRetry); + + // This should never happen under normal logic flow since we always throw exception on error. + if (lastSuccessfulMappedResponse == null) + { + throw new InvalidOperationException("Download completed without any successful response. This indicates a logical error in the retry handling."); + } + + FireTransferCompletedEvent(lastSuccessfulMappedResponse, this._request.FilePath, Interlocked.Read(ref _totalTransferredBytes), totalBytesFromResponse ?? -1); + + return lastSuccessfulMappedResponse; } private static bool HandleExceptionForHttpClient(Exception exception, int retries, int maxRetries) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs new file mode 100644 index 000000000000..c0b0a99c8709 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartDownloadCommand.async.cs @@ -0,0 +1,134 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; +using System.Threading; +using System.Threading.Tasks; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer.Internal +{ + internal partial class MultipartDownloadCommand : BaseCommand + { + /// + public override async Task ExecuteAsync(CancellationToken cancellationToken) + { + // Validate request parameters + ValidateRequest(); + + // Fire initiated event before starting any network operations + FireTransferInitiatedEvent(); + + // Create configuration from request settings + var config = CreateConfiguration(); + + _logger.DebugFormat("MultipartDownloadCommand: Configuration - ConcurrentServiceRequests={0}, BufferSize={1}, TargetPartSize={2}", + config.ConcurrentServiceRequests, + config.BufferSize, + config.TargetPartSizeBytes + ); + + // Create data handler for writing parts to disk + using (var dataHandler = new FilePartDataHandler(config)) + { + // Create coordinator to manage the download process + // Pass shared HTTP throttler to control concurrency across files + using (var coordinator = new MultipartDownloadManager( + _s3Client, + _request, + config, + dataHandler, + RequestEventHandler, + _sharedHttpThrottler)) + { + long totalBytes = -1; + try + { + // Start unified download operation (discovers strategy and starts downloads) + _logger.DebugFormat("MultipartDownloadCommand: Starting unified download operation"); + var downloadResult = await coordinator.StartDownloadAsync(DownloadPartProgressEventCallback, cancellationToken) + .ConfigureAwait(false); + + totalBytes = downloadResult.ObjectSize; + + _logger.DebugFormat("MultipartDownloadCommand: Downloaded {0} part(s), total size: {1} bytes, IsSinglePart={2}", + downloadResult.TotalParts, downloadResult.ObjectSize, downloadResult.IsSinglePart); + + // Wait for all downloads to complete before returning + // This ensures file is fully written and committed for file-based downloads + // For stream-based downloads, this task completes immediately (no-op) + _logger.DebugFormat("MultipartDownloadCommand: Waiting for download completion"); + await coordinator.DownloadCompletionTask.ConfigureAwait(false); + + _logger.DebugFormat("MultipartDownloadCommand: Completed multipart download"); + + // Map the response from the initial GetObject response + // The initial response contains all the metadata we need + var mappedResponse = ResponseMapper.MapGetObjectResponse(downloadResult.InitialResponse); + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Set ContentLength to total object size (not just first part) + mappedResponse.Headers.ContentLength = downloadResult.ObjectSize; + + // Set ContentRange to represent the entire object: bytes 0-(ContentLength-1)/ContentLength + // S3 returns null for 0-byte objects, so we match that behavior + if (downloadResult.ObjectSize == 0) + { + mappedResponse.ContentRange = null; + } + else + { + mappedResponse.ContentRange = $"bytes 0-{downloadResult.ObjectSize - 1}/{downloadResult.ObjectSize}"; + } + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Handle composite checksums for multipart objects + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + if (mappedResponse.ChecksumType == ChecksumType.COMPOSITE) + { + mappedResponse.ChecksumCRC32 = null; + mappedResponse.ChecksumCRC32C = null; + mappedResponse.ChecksumCRC64NVME = null; + mappedResponse.ChecksumSHA1 = null; + mappedResponse.ChecksumSHA256 = null; + } + + // Fire completed event + FireTransferCompletedEvent(mappedResponse, totalBytes); + + return mappedResponse; + } + catch (Exception ex) + { + _logger.Error(ex, "Exception during multipart download"); + + // Fire failed event + FireTransferFailedEvent(totalBytes); + + throw; + } + } + } + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index 8dbb8ba561e7..9c6983f9bdc0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -27,32 +27,44 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } Dictionary _expectedUploadParts = new Dictionary(); - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { + // Fire transfer initiated event FIRST, before choosing path + FireTransferInitiatedEvent(); + if ( (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.InputStream.CanSeek) || this._fileTransporterRequest.ContentLength == -1) { - await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); + return await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); } else { - var initRequest = ConstructInitiateMultipartUploadRequest(); - var initResponse = await _s3Client.InitiateMultipartUploadAsync(initRequest, cancellationToken) + InitiateMultipartUploadResponse initResponse = null; + try + { + var initRequest = ConstructInitiateMultipartUploadRequest(); + initResponse = await _s3Client.InitiateMultipartUploadAsync(initRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - Logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); + _logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); + } + catch (Exception) + { + FireTransferFailedEvent(); + throw; + } var pendingUploadPartTasks = new List>(); - SemaphoreSlim localThrottler = null; CancellationTokenSource internalCts = null; + try { - Logger.DebugFormat("Queue up the UploadPartRequests to be executed"); + _logger.DebugFormat("Queue up the UploadPartRequests to be executed"); long filePosition = 0; for (int i = 1; filePosition < this._contentLength; i++) { @@ -88,7 +100,7 @@ public override async Task ExecuteAsync(CancellationToken cancellationToken) this._totalNumberOfParts = this._partsToUpload.Count; - Logger.DebugFormat("Scheduling the {0} UploadPartRequests in the queue", this._totalNumberOfParts); + _logger.DebugFormat("Scheduling the {0} UploadPartRequests in the queue", this._totalNumberOfParts); internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); var concurrencyLevel = CalculateConcurrentServiceRequests(); @@ -120,20 +132,26 @@ await localThrottler.WaitAsync(cancellationToken) pendingUploadPartTasks.Add(task); } - Logger.DebugFormat("Waiting for upload part requests to complete. ({0})", initResponse.UploadId); - _uploadResponses = await WhenAllOrFirstExceptionAsync(pendingUploadPartTasks, cancellationToken) + _logger.DebugFormat("Waiting for upload part requests to complete. ({0})", initResponse.UploadId); + _uploadResponses = await TaskHelpers.WhenAllOrFirstExceptionAsync(pendingUploadPartTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - Logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); + _logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); var compRequest = ConstructCompleteMultipartUploadRequest(initResponse); - await this._s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken) + var completeResponse = await this._s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); - Logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); + _logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); + var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); + FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } catch (Exception e) { - Logger.Error(e, "Exception while uploading. ({0})", initResponse.UploadId); + _logger.Error(e, "Exception while uploading. ({0})", initResponse?.UploadId ?? "unknown"); + + FireTransferFailedEvent(); + // Can't do async invocation in the catch block, doing cleanup synchronously. Cleanup(initResponse.UploadId, pendingUploadPartTasks); throw; @@ -228,7 +246,7 @@ private void Cleanup(string uploadId, List> tasks) } catch(Exception exception) { - Logger.InfoFormat( + _logger.InfoFormat( "A timeout occured while waiting for all upload part request to complete as part of aborting the multipart upload : {0}", exception.Message); } @@ -255,10 +273,10 @@ private void AbortMultipartUpload(string uploadId) } catch (Exception e) { - Logger.InfoFormat("Error attempting to abort multipart for key {0}: {1}", this._fileTransporterRequest.Key, e.Message); + _logger.InfoFormat("Error attempting to abort multipart for key {0}: {1}", this._fileTransporterRequest.Key, e.Message); } } - private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { cancellationToken.ThrowIfCancellationRequested(); @@ -274,8 +292,19 @@ private void AbortMultipartUpload(string uploadId) } }; - var initiateRequest = ConstructInitiateMultipartUploadRequest(requestEventHandler); - var initiateResponse = await _s3Client.InitiateMultipartUploadAsync(initiateRequest, cancellationToken).ConfigureAwait(false); + InitiateMultipartUploadResponse initiateResponse = null; + + try + { + var initiateRequest = ConstructInitiateMultipartUploadRequest(requestEventHandler); + initiateResponse = await _s3Client.InitiateMultipartUploadAsync(initiateRequest, cancellationToken).ConfigureAwait(false); + } + catch (Exception ex) + { + FireTransferFailedEvent(); + _logger.Error(ex, "Failed to initiate multipart upload for unseekable stream"); + throw; + } try { @@ -322,7 +351,7 @@ private void AbortMultipartUpload(string uploadId) UploadPartRequest uploadPartRequest = ConstructUploadPartRequestForNonSeekableStream(nextUploadBuffer, partNumber, partSize, isLastPart, initiateResponse); var partResponse = await _s3Client.UploadPartAsync(uploadPartRequest, cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("Uploaded part {0}. (Last part = {1}, Part size = {2}, Upload Id: {3})", partNumber, isLastPart, partSize, initiateResponse.UploadId); + _logger.DebugFormat("Uploaded part {0}. (Last part = {1}, Part size = {2}, Upload Id: {3})", partNumber, isLastPart, partSize, initiateResponse.UploadId); uploadPartResponses.Add(partResponse); partNumber++; @@ -343,12 +372,18 @@ private void AbortMultipartUpload(string uploadId) this._uploadResponses = uploadPartResponses; CompleteMultipartUploadRequest compRequest = ConstructCompleteMultipartUploadRequest(initiateResponse, true, requestEventHandler); - await _s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken).ConfigureAwait(false); - Logger.DebugFormat("Completed multi part upload. (Part count: {0}, Upload Id: {1})", uploadPartResponses.Count, initiateResponse.UploadId); + var completeResponse = await _s3Client.CompleteMultipartUploadAsync(compRequest, cancellationToken).ConfigureAwait(false); + _logger.DebugFormat("Completed multi part upload. (Part count: {0}, Upload Id: {1})", uploadPartResponses.Count, initiateResponse.UploadId); + + var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); + FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } } catch (Exception ex) { + FireTransferFailedEvent(); + await _s3Client.AbortMultipartUploadAsync(new AbortMultipartUploadRequest() { BucketName = request.BucketName, @@ -356,7 +391,7 @@ await _s3Client.AbortMultipartUploadAsync(new AbortMultipartUploadRequest() RequestPayer = request.RequestPayer, UploadId = initiateResponse.UploadId }).ConfigureAwait(false); - Logger.Error(ex, ex.Message); + _logger.Error(ex, ex.Message); throw; } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs index 192560f837ee..414e9a1641d1 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs @@ -24,14 +24,15 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { var getRequest = ConstructRequest(); var response = await _s3Client.GetObjectAsync(getRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); _responseStream = response.ResponseStream; + return new TransferUtilityOpenStreamResponse(); } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs new file mode 100644 index 000000000000..3d4d3e197acc --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamWithResponseCommand.async.cs @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using Amazon.Runtime.Internal.Util; +using Amazon.S3.Model; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace Amazon.S3.Transfer.Internal +{ + internal partial class OpenStreamWithResponseCommand : BaseCommand + { + private readonly Logger _logger = Logger.GetLogger(typeof(OpenStreamWithResponseCommand)); + + public override async Task ExecuteAsync(CancellationToken cancellationToken) + { + _logger.DebugFormat("OpenStreamWithResponseCommand: Creating BufferedMultipartStream with MultipartDownloadType={0}", + _request.MultipartDownloadType); + + _logger.DebugFormat("OpenStreamWithResponseCommand: Configuration - ConcurrentServiceRequests={0}, MaxInMemoryParts={1}, BufferSize={2}", + _config.ConcurrentServiceRequests, + _request.MaxInMemoryParts, + _s3Client.Config.BufferSize + ); + + var bufferedStream = BufferedMultipartStream.Create(_s3Client, _request, _config, this.RequestEventHandler); + await bufferedStream.InitializeAsync(cancellationToken).ConfigureAwait(false); + + // Populate metadata from the initial GetObject response (from discovery phase) + var discoveryResult = bufferedStream.DiscoveryResult; + + _logger.DebugFormat("OpenStreamWithResponseCommand: Stream initialized successfully - ObjectSize={0}, TotalParts={1}, IsSinglePart={2}", + discoveryResult.ObjectSize, + discoveryResult.TotalParts, + discoveryResult.IsSinglePart); + + var response = ResponseMapper.MapGetObjectResponseToOpenStream(discoveryResult.InitialResponse); + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Set ContentLength to total object size (not just first part) + response.Headers.ContentLength = discoveryResult.ObjectSize; + + // Set ContentRange to represent the entire object: bytes 0-(ContentLength-1)/ContentLength + // S3 returns null for 0-byte objects, so we match that behavior + if (discoveryResult.ObjectSize == 0) + { + response.ContentRange = null; + } + else + { + response.ContentRange = $"bytes 0-{discoveryResult.ObjectSize - 1}/{discoveryResult.ObjectSize}"; + } + + // SEP Part GET Step 7 / Ranged GET Step 9: + // Handle composite checksums for multipart objects + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + if (response.ChecksumType == ChecksumType.COMPOSITE) + { + response.ChecksumCRC32 = null; + response.ChecksumCRC32C = null; + response.ChecksumCRC64NVME = null; + response.ChecksumSHA1 = null; + response.ChecksumSHA256 = null; + } + + response.ResponseStream = bufferedStream; + return response; + + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs index e4c94d65044f..3b350c1bd877 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs @@ -24,14 +24,16 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { try { + FireTransferInitiatedEvent(); + if (AsyncThrottler != null) { await this.AsyncThrottler.WaitAsync(cancellationToken) @@ -39,8 +41,19 @@ await this.AsyncThrottler.WaitAsync(cancellationToken) } var putRequest = ConstructRequest(); - await _s3Client.PutObjectAsync(putRequest, cancellationToken) + var response = await _s3Client.PutObjectAsync(putRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + var mappedResponse = ResponseMapper.MapPutObjectResponse(response); + + FireTransferCompletedEvent(mappedResponse); + + return mappedResponse; + } + catch (Exception) + { + FireTransferFailedEvent(); + throw; } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index a160bc1504f4..85cb94ac4662 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -16,113 +16,332 @@ using Amazon.S3.Model; using Amazon.S3.Util; using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; +using Amazon.Runtime; +using Amazon.Runtime.Internal.Util; +using Amazon.Util.Internal; namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { - TransferUtilityConfig _config; public bool DownloadFilesConcurrently { get; set; } + private readonly Logger _logger = Logger.GetLogger(typeof(DownloadDirectoryCommand)); + internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDirectoryRequest request, TransferUtilityConfig config) - : this(s3Client, request) + : this(s3Client, request, config, useMultipartDownload: false) + { + } + + + public override async Task ExecuteAsync(CancellationToken cancellationToken) { - this._config = config; + try + { + FireTransferInitiatedEvent(); + + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Starting - DownloadFilesConcurrently={0}, UseMultipartDownload={1}, ConcurrentServiceRequests={2}", + DownloadFilesConcurrently, this._useMultipartDownload, this._config.ConcurrentServiceRequests); + + // Step 1: Validate and setup + ValidateRequest(); + EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); + + // Step 2: List S3 objects + var (s3Objects, prefixLength) = await ListS3ObjectsAsync(cancellationToken) + .ConfigureAwait(false); + + this._totalNumberOfFilesToDownload = s3Objects.Count; + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Found {0} total objects, TotalBytes={1}", + s3Objects.Count, this._totalBytes); + + // Step 3: Filter to actual files (exclude directory markers) + var objectsToDownload = FilterObjectsToDownload(s3Objects); + + // Step 4: Setup resources and execute downloads + using (var resources = CreateDownloadResources(cancellationToken)) + { + await ExecuteParallelDownloadsAsync( + objectsToDownload, + prefixLength, + resources, + cancellationToken) + .ConfigureAwait(false); + } + + // Step 5: Build response + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteAsync: Completed - ObjectsDownloaded={0}, ObjectsFailed={1}", + _numberOfFilesDownloaded, _errors.Count); + + var response = BuildResponse(); + FireTransferCompletedEvent(response); + return response; + } + catch + { + FireTransferFailedEvent(); + throw; + } } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + /// + /// Encapsulates disposable resources used during directory download. + /// + private sealed class DownloadResources : IDisposable { - ValidateRequest(); - EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); + public SemaphoreSlim HttpRequestThrottler { get; } + public CancellationTokenSource InternalCancellationTokenSource { get; } + + public DownloadResources( + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource cancellationTokenSource) + { + HttpRequestThrottler = httpRequestThrottler; + InternalCancellationTokenSource = cancellationTokenSource; + } + public void Dispose() + { + InternalCancellationTokenSource?.Dispose(); + HttpRequestThrottler?.Dispose(); + } + } + + /// + /// Lists S3 objects to download and calculates prefix length. + /// + private async Task<(List objects, int prefixLength)> ListS3ObjectsAsync( + CancellationToken cancellationToken) + { List objs; string listRequestPrefix; + try { - ListObjectsRequest listRequest = ConstructListObjectRequest(); + var listRequest = ConstructListObjectRequest(); listRequestPrefix = listRequest.Prefix; - objs = await GetS3ObjectsToDownloadAsync(listRequest, cancellationToken).ConfigureAwait(false); + objs = await GetS3ObjectsToDownloadAsync(listRequest, cancellationToken) + .ConfigureAwait(false); } - catch (AmazonS3Exception ex) + catch (AmazonS3Exception ex) when (ex.StatusCode == System.Net.HttpStatusCode.NotImplemented) { - if (ex.StatusCode != System.Net.HttpStatusCode.NotImplemented) - throw; - - ListObjectsV2Request listRequestV2 = ConstructListObjectRequestV2(); + var listRequestV2 = ConstructListObjectRequestV2(); listRequestPrefix = listRequestV2.Prefix; - objs = await GetS3ObjectsToDownloadV2Async(listRequestV2, cancellationToken).ConfigureAwait(false); + objs = await GetS3ObjectsToDownloadV2Async(listRequestV2, cancellationToken) + .ConfigureAwait(false); } - this._totalNumberOfFilesToDownload = objs.Count; + // Calculate prefix length + int prefixLength = listRequestPrefix.Length; + if (_request.DisableSlashCorrection && !listRequestPrefix.EndsWith("/")) + { + prefixLength = listRequestPrefix.LastIndexOf("/") + 1; + } - SemaphoreSlim asyncThrottler = null; - CancellationTokenSource internalCts = null; + return (objs, prefixLength); + } - try + /// + /// Filters out directory markers (keys ending with "/") from S3 objects list. + /// + private List FilterObjectsToDownload(List s3Objects) + { + var filtered = s3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + _logger.DebugFormat("DownloadDirectoryCommand.FilterObjectsToDownload: Filtered to {0} files to download (excluded {1} directory markers)", + filtered.Count, s3Objects.Count - filtered.Count); + + return filtered; + } + + /// + /// Creates resources needed for parallel downloads with proper throttling. + /// Throttling architecture: + /// - Task pool pattern (ForEachWithConcurrencyAsync): Controls concurrent file downloads + /// - HttpRequestThrottler: Controls total HTTP requests across ALL file downloads + /// + /// Example with ConcurrentServiceRequests = 10: + /// - Task pool creates max 10 concurrent file download tasks + /// - HttpRequestThrottler = 10: All files share 10 total HTTP request slots + /// - Without HTTP throttler: 10 multipart files × 10 parts = 100 concurrent HTTP requests + /// - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files + /// + /// This prevents resource exhaustion when downloading many large files with multipart downloads. + /// + private DownloadResources CreateDownloadResources(CancellationToken cancellationToken) + { + SemaphoreSlim httpRequestThrottler = null; + + // HTTP-level throttler: Shared across all downloads to control total HTTP concurrency + // Only needed for multipart downloads where each file makes multiple HTTP requests + if (this._useMultipartDownload) { - asyncThrottler = DownloadFilesConcurrently ? - new SemaphoreSlim(this._config.ConcurrentServiceRequests) : - new SemaphoreSlim(1); + httpRequestThrottler = new SemaphoreSlim(this._config.ConcurrentServiceRequests); + _logger.DebugFormat("DownloadDirectoryCommand.CreateDownloadResources: Created HTTP throttler with MaxConcurrentRequests={0}", + this._config.ConcurrentServiceRequests); + } + + var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + + return new DownloadResources(httpRequestThrottler, internalCts); + } + + /// + /// Executes parallel downloads of all S3 objects using task pool pattern. + /// Only creates as many tasks as the concurrency limit allows (not all files up front). + /// + private async Task ExecuteParallelDownloadsAsync( + List objectsToDownload, + int prefixLength, + DownloadResources resources, + CancellationToken cancellationToken) + { + int concurrencyLevel = DownloadFilesConcurrently + ? this._config.ConcurrentServiceRequests + : 1; - internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); - var pendingTasks = new List(); - foreach (S3Object s3o in objs) + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Starting task pool with ConcurrencyLevel={0}, TotalFiles={1}", + concurrencyLevel, objectsToDownload.Count); + + await TaskHelpers.ForEachWithConcurrencyAsync( + objectsToDownload, + concurrencyLevel, + async (s3Object, ct) => { - if (s3o.Key.EndsWith("/", StringComparison.Ordinal)) - continue; + ct.ThrowIfCancellationRequested(); - await asyncThrottler.WaitAsync(cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + await DownloadSingleFileAsync( + s3Object, + prefixLength, + resources.HttpRequestThrottler, + resources.InternalCancellationTokenSource) + .ConfigureAwait(false); + }, + cancellationToken) + .ConfigureAwait(false); - cancellationToken.ThrowIfCancellationRequested(); - if (internalCts.IsCancellationRequested) - { - // Operation cancelled as one of the download requests failed with an exception, - // don't schedule any more download tasks. - // Don't throw an OperationCanceledException here as we want to process the - // responses and throw the original exception. - break; - } + _logger.DebugFormat("DownloadDirectoryCommand.ExecuteParallelDownloadsAsync: Task pool completed - ObjectsDownloaded={0}, ObjectsFailed={1}", + _numberOfFilesDownloaded, _errors.Count); + } - // Valid for serial uploads when - // TransferUtilityDownloadDirectoryRequest.DownloadFilesConcurrently is set to false. - int prefixLength = listRequestPrefix.Length; + /// + /// Downloads a single S3 object to local file system with validation and failure handling. + /// + private async Task DownloadSingleFileAsync( + S3Object s3Object, + int prefixLength, + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource internalCts) + { + if (internalCts.IsCancellationRequested) + return; - // If DisableSlashCorrection is enabled (i.e. S3Directory is a key prefix) and it doesn't end with '/' then we need the parent directory to properly construct download path. - if (_request.DisableSlashCorrection && !listRequestPrefix.EndsWith("/")) - { - prefixLength = listRequestPrefix.LastIndexOf("/") + 1; - } + this._currentFile = s3Object.Key.Substring(prefixLength); + var downloadRequest = ConstructTransferUtilityDownloadRequest(s3Object, prefixLength); - this._currentFile = s3o.Key.Substring(prefixLength); + // Create failure callback + Action onFailure = (ex) => + { + this._request.OnRaiseObjectDownloadFailedEvent( + new ObjectDownloadFailedEventArgs( + this._request, + downloadRequest, + ex)); + }; - var downloadRequest = ConstructTransferUtilityDownloadRequest(s3o, prefixLength); - var command = new DownloadCommand(this._s3Client, downloadRequest); + // Validate file path with failure policy + var isValid = await _failurePolicy.ExecuteAsync( + () => ValidateDownloadPath(downloadRequest.FilePath), + onFailure, + internalCts + ).ConfigureAwait(false); - var task = ExecuteCommandAsync(command, internalCts, asyncThrottler); - pendingTasks.Add(task); - } - await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + if (!isValid) + return; + + // Execute download with failure policy + await _failurePolicy.ExecuteAsync( + () => ExecuteDownloadCommandAsync(downloadRequest, httpRequestThrottler, internalCts.Token), + onFailure, + internalCts + ).ConfigureAwait(false); + } + + /// + /// Validates that the download path is within the target directory. + /// + private Task ValidateDownloadPath(string filePath) + { + if (!InternalSDKUtils.IsFilePathRootedWithDirectoryPath(filePath, _request.LocalDirectory)) + { + throw new AmazonClientException( + $"The file {filePath} is not allowed outside of the target directory {_request.LocalDirectory}."); + } + return Task.CompletedTask; + } + + /// + /// Creates and executes the appropriate download command for the file. + /// + private async Task ExecuteDownloadCommandAsync( + TransferUtilityDownloadRequest downloadRequest, + SemaphoreSlim httpRequestThrottler, + CancellationToken cancellationToken) + { + BaseCommand command; + + if (this._useMultipartDownload) + { + command = new MultipartDownloadCommand( + this._s3Client, + downloadRequest, + this._config, + httpRequestThrottler); } - finally + else { - internalCts.Dispose(); - asyncThrottler.Dispose(); + command = new DownloadCommand(this._s3Client, downloadRequest); } + + await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); } + /// + /// Builds the response object based on download results. + /// + private TransferUtilityDownloadDirectoryResponse BuildResponse() + { + return new TransferUtilityDownloadDirectoryResponse + { + ObjectsDownloaded = _numberOfFilesDownloaded, + ObjectsFailed = _errors.Count, + Errors = _errors.ToList(), + Result = _errors.Count == 0 + ? DirectoryResult.Success + : (_numberOfFilesDownloaded > 0 + ? DirectoryResult.PartialSuccess + : DirectoryResult.Failure) + }; + } + + private async Task> GetS3ObjectsToDownloadAsync(ListObjectsRequest listRequest, CancellationToken cancellationToken) { + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Starting object listing"); + List objs = new List(); + int pageCount = 0; do { ListObjectsResponse listResponse = await this._s3Client.ListObjectsAsync(listRequest, cancellationToken) @@ -140,13 +359,24 @@ private async Task> GetS3ObjectsToDownloadAsync(ListObjectsReques } } listRequest.Marker = listResponse.NextMarker; + pageCount++; + + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", + pageCount, listResponse.S3Objects?.Count ?? 0, objs.Count); } while (!string.IsNullOrEmpty(listRequest.Marker)); + + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadAsync: Listing completed - TotalPages={0}, TotalObjects={1}", + pageCount, objs.Count); + return objs; } private async Task> GetS3ObjectsToDownloadV2Async(ListObjectsV2Request listRequestV2, CancellationToken cancellationToken) { + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Starting object listing (V2 API)"); + List objs = new List(); + int pageCount = 0; do { ListObjectsV2Response listResponse = await this._s3Client.ListObjectsV2Async(listRequestV2, cancellationToken) @@ -164,7 +394,15 @@ private async Task> GetS3ObjectsToDownloadV2Async(ListObjectsV2Re } } listRequestV2.ContinuationToken = listResponse.NextContinuationToken; + pageCount++; + + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Page {0} completed - ObjectsInPage={1}, TotalObjectsSoFar={2}", + pageCount, listResponse.S3Objects?.Count ?? 0, objs.Count); } while (!string.IsNullOrEmpty(listRequestV2.ContinuationToken)); + + _logger.DebugFormat("DownloadDirectoryCommand.GetS3ObjectsToDownloadV2Async: Listing completed - TotalPages={0}, TotalObjects={1}", + pageCount, objs.Count); + return objs; } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index 75e1744d5435..713fd9e04e32 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -20,87 +20,257 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Amazon.Runtime.Internal.Util; namespace Amazon.S3.Transfer.Internal { - internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { public bool UploadFilesConcurrently { get; set; } + private readonly Logger _logger = Logger.GetLogger(typeof(UploadDirectoryCommand)); - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { - string prefix = GetKeyPrefix(); + try + { + // Step 1: Setup paths and discover files + string prefix = GetKeyPrefix(); + string basePath = new DirectoryInfo(this._request.Directory).FullName; - string basePath = new DirectoryInfo(this._request.Directory).FullName; + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Starting - BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", + basePath, prefix, UploadFilesConcurrently, this._config.ConcurrentServiceRequests); - string[] filePaths = await GetFiles(basePath, this._request.SearchPattern, this._request.SearchOption, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); - this._totalNumberOfFiles = filePaths.Length; + // Step 2: Discover files to upload + string[] filePaths = await DiscoverFilesAsync(basePath, cancellationToken) + .ConfigureAwait(false); - SemaphoreSlim asyncThrottler = null; - SemaphoreSlim loopThrottler = null; - CancellationTokenSource internalCts = null; - try + this._totalNumberOfFiles = filePaths.Length; + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Discovered {0} file(s) to upload. TotalBytes={1}", + _totalNumberOfFiles, _totalBytes); + + FireTransferInitiatedEvent(); + + + // Step 3: Setup resources and execute uploads + using (var resources = CreateUploadResources(cancellationToken)) + { + await ExecuteParallelUploadsAsync( + filePaths, + basePath, + prefix, + resources, + cancellationToken) + .ConfigureAwait(false); + } + + // Step 4: Build and return response + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", + _numberOfFilesSuccessfullyUploaded, _errors.Count); + + var response = BuildResponse(); + FireTransferCompletedEvent(response); + return response; + } + catch { - var pendingTasks = new List(); - loopThrottler = UploadFilesConcurrently ? - new SemaphoreSlim(this._config.ConcurrentServiceRequests) : - new SemaphoreSlim(1); - - asyncThrottler = this._utility.S3Client is Amazon.S3.Internal.IAmazonS3Encryption ? - // If we are using AmazonS3EncryptionClient, don't set the async throttler. - // The loopThrottler will be used to control how many files are uploaded in parallel. - // Each upload (multipart) will upload parts serially. - null : - // Use a throttler which will be shared between simple and multipart uploads - // to control concurrent IO. - new SemaphoreSlim(this._config.ConcurrentServiceRequests); - - - internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); - foreach (string filepath in filePaths) + FireTransferFailedEvent(); + throw; + } + } + + /// + /// Encapsulates disposable resources used during directory upload. + /// + private sealed class UploadResources : IDisposable + { + public SemaphoreSlim HttpRequestThrottler { get; } + public CancellationTokenSource InternalCancellationTokenSource { get; } + + public UploadResources( + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource cancellationTokenSource) + { + HttpRequestThrottler = httpRequestThrottler; + InternalCancellationTokenSource = cancellationTokenSource; + } + + public void Dispose() + { + InternalCancellationTokenSource?.Dispose(); + HttpRequestThrottler?.Dispose(); + } + } + + /// + /// Discovers files to upload from the local directory and calculates total bytes. + /// + private async Task DiscoverFilesAsync(string basePath, CancellationToken cancellationToken) + { + return await Task.Run(() => + { + var filePaths = Directory.GetFiles( + basePath, + this._request.SearchPattern, + this._request.SearchOption); + + foreach (var filePath in filePaths) { - await loopThrottler.WaitAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); - - cancellationToken.ThrowIfCancellationRequested(); - if (internalCts.IsCancellationRequested) - { - // Operation cancelled as one of the upload requests failed with an exception, - // don't schedule any more upload tasks. - // Don't throw an OperationCanceledException here as we want to process the - // responses and throw the original exception. - break; - } - var uploadRequest = ConstructRequest(basePath, filepath, prefix); - var uploadCommand = _utility.GetUploadCommand(uploadRequest, asyncThrottler); - - var task = ExecuteCommandAsync(uploadCommand, internalCts, loopThrottler); - pendingTasks.Add(task); + _totalBytes += new FileInfo(filePath).Length; } - await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) - .ConfigureAwait(continueOnCapturedContext: false); + + return filePaths; + }, cancellationToken).ConfigureAwait(false); + } + + /// + /// Creates resources needed for parallel uploads with proper throttling. + /// + /// Throttling architecture: + /// - Task pool pattern (ForEachWithConcurrencyAsync): Controls concurrent file uploads + /// - HttpRequestThrottler: Controls total HTTP requests across ALL file uploads + /// + /// Example with ConcurrentServiceRequests = 10: + /// - Task pool creates max 10 concurrent file upload tasks + /// - HttpRequestThrottler = 10: All files share 10 total HTTP request slots + /// - Without HTTP throttler: 10 multipart files × 10 parts = 100 concurrent HTTP requests + /// - With HTTP throttler: Enforces 10 total concurrent HTTP requests across all files + /// + /// Special case: When using AmazonS3EncryptionClient, HTTP throttler is disabled. + /// The task pool concurrency control is sufficient since encryption uploads are serial per file. + /// + private UploadResources CreateUploadResources(CancellationToken cancellationToken) + { + SemaphoreSlim httpRequestThrottler = null; + + // HTTP-level throttler: Shared across all uploads to control total HTTP concurrency + // Disabled for encryption client since each upload processes parts serially + if (this._utility.S3Client is Amazon.S3.Internal.IAmazonS3Encryption) + { + _logger.DebugFormat("UploadDirectoryCommand.CreateUploadResources: HTTP throttler disabled for encryption client. Multipart uploads will be serial per file."); } - finally - { - internalCts.Dispose(); - loopThrottler.Dispose(); - if (asyncThrottler != null) - asyncThrottler.Dispose(); + else + { + httpRequestThrottler = new SemaphoreSlim(this._config.ConcurrentServiceRequests); + _logger.DebugFormat("UploadDirectoryCommand.CreateUploadResources: Created HTTP throttler with MaxConcurrentRequests={0}", + this._config.ConcurrentServiceRequests); } + + var internalCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + + return new UploadResources(httpRequestThrottler, internalCts); + } + + /// + /// Executes parallel uploads of all files using task pool pattern. + /// Only creates as many tasks as the concurrency limit allows (not all files up front). + /// + private async Task ExecuteParallelUploadsAsync( + string[] filePaths, + string basePath, + string prefix, + UploadResources resources, + CancellationToken cancellationToken) + { + int concurrencyLevel = UploadFilesConcurrently + ? this._config.ConcurrentServiceRequests + : 1; + + _logger.DebugFormat("UploadDirectoryCommand.ExecuteParallelUploadsAsync: Starting task pool with ConcurrencyLevel={0}, TotalFiles={1}", + concurrencyLevel, filePaths.Length); + + await TaskHelpers.ForEachWithConcurrencyAsync( + filePaths, + concurrencyLevel, + async (filepath, ct) => + { + ct.ThrowIfCancellationRequested(); + + await UploadSingleFileAsync( + filepath, + basePath, + prefix, + resources.HttpRequestThrottler, + resources.InternalCancellationTokenSource) + .ConfigureAwait(false); + }, + cancellationToken) + .ConfigureAwait(false); + + _logger.DebugFormat("UploadDirectoryCommand.ExecuteParallelUploadsAsync: Task pool completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", + _numberOfFilesSuccessfullyUploaded, _errors.Count); } - private Task GetFiles(string path, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken) + /// + /// Uploads a single file to S3 with failure handling. + /// + private async Task UploadSingleFileAsync( + string filepath, + string basePath, + string prefix, + SemaphoreSlim httpRequestThrottler, + CancellationTokenSource internalCts) { - return Task.Run(() => - { - var filePaths = Directory.GetFiles(path, searchPattern, searchOption); - foreach (var filePath in filePaths) - { - _totalBytes += new FileInfo(filePath).Length; - } - return filePaths; - }, cancellationToken); + if (internalCts.IsCancellationRequested) + return; + + var uploadRequest = ConstructRequest(basePath, filepath, prefix); + + // Create failure callback + Action onFailure = (ex) => + { + this._request.OnRaiseObjectUploadFailedEvent( + new ObjectUploadFailedEventArgs( + this._request, + uploadRequest, + ex)); + }; + + // Execute upload with failure policy + await _failurePolicy.ExecuteAsync( + () => ExecuteUploadCommandAsync(uploadRequest, httpRequestThrottler, internalCts.Token), + onFailure, + internalCts + ).ConfigureAwait(false); + } + + /// + /// Creates and executes the appropriate upload command for the file. + /// + private async Task ExecuteUploadCommandAsync( + TransferUtilityUploadRequest uploadRequest, + SemaphoreSlim httpRequestThrottler, + CancellationToken cancellationToken) + { + _logger.DebugFormat("UploadDirectoryCommand.ExecuteUploadCommandAsync: Starting upload command"); + + var command = _utility.GetUploadCommand(uploadRequest, httpRequestThrottler); + await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + var uploaded = Interlocked.Increment(ref _numberOfFilesSuccessfullyUploaded); + _logger.DebugFormat("UploadDirectoryCommand.ExecuteUploadCommandAsync: Completed upload. FilesSuccessfullyUploaded={0}", uploaded); + } + + /// + /// Builds the response object based on upload results. + /// + private TransferUtilityUploadDirectoryResponse BuildResponse() + { + var response = new TransferUtilityUploadDirectoryResponse + { + ObjectsUploaded = _numberOfFilesSuccessfullyUploaded, + ObjectsFailed = _errors.Count, + Errors = _errors.ToList(), + Result = _errors.Count == 0 + ? DirectoryResult.Success + : (_numberOfFilesSuccessfullyUploaded > 0 + ? DirectoryResult.PartialSuccess + : DirectoryResult.Failure) + }; + + _logger.DebugFormat("UploadDirectoryCommand.BuildResponse: Uploaded={0}, Failed={1}, Result={2}", + response.ObjectsUploaded, response.ObjectsFailed, response.Result); + return response; } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs index 472a5933ba28..d9cf6863e791 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs @@ -71,14 +71,7 @@ public partial class TransferUtility : ITransferUtility { "s3-object-lambda" }; - private static Logger Logger - { - get - { - - return Logger.GetLogger(typeof(ITransferUtility)); - } - } + private readonly Logger _logger = Logger.GetLogger(typeof(TransferUtility)); #region Constructors /// @@ -386,7 +379,7 @@ private static TransferUtilityUploadRequest ConstructUploadRequest(Stream stream }; } - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) { validate(request); diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs new file mode 100644 index 000000000000..6c63c4b0a75b --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility abort multipart uploads operations. + /// Contains response metadata from abort multipart uploads operations. + /// + public class TransferUtilityAbortMultipartUploadsResponse + { + // Empty placeholder class - properties will be added in future iterations + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs index b0556e92487a..7185e0d0cb22 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryRequest.cs @@ -29,6 +29,7 @@ using Amazon.Util; using Amazon.Runtime.Internal; using System.Globalization; +using System.Threading; namespace Amazon.S3.Transfer @@ -56,6 +57,135 @@ public class TransferUtilityDownloadDirectoryRequest private string ifMatch; private string ifNoneMatch; private ResponseHeaderOverrides responseHeaders; + private FailurePolicy failurePolicy = FailurePolicy.AbortOnFailure; + + /// + /// Gets or sets the failure policy for the download directory operation. + /// Determines whether the operation should abort or continue when a failure occurs during download. + /// The default value is . + /// + public FailurePolicy FailurePolicy + { + get { return this.failurePolicy; } + set { this.failurePolicy = value; } + } + + /// + /// Occurs when an individual object fails to download during a DownloadDirectory operation. + /// + /// + /// Subscribers will receive a instance containing + /// the original , the failed + /// , and the exception that caused the failure. + /// This event is raised on a background thread by the transfer utility. + /// + /// + /// request.ObjectDownloadFailedEvent += (sender, args) => + /// { + /// // inspect args.DirectoryRequest, args.ObjectRequest, args.Exception + /// }; + /// + public event EventHandler ObjectDownloadFailedEvent; + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The details of the failed object download. + internal void OnRaiseObjectDownloadFailedEvent(ObjectDownloadFailedEventArgs args) + { + ObjectDownloadFailedEvent?.Invoke(this, args); + } + + /// + /// Occurs when the download directory operation is initiated. + /// + /// + /// + /// The DownloadDirectoryInitiatedEvent is fired when the download directory operation begins. + /// The DownloadDirectoryInitiatedEventArgs contains the original request information. + /// + /// + /// Attach event handlers to this event if you are interested in receiving + /// DownloadDirectoryInitiatedEvent notifications. + /// + /// + /// + /// private void downloadStarted(object sender, DownloadDirectoryInitiatedEventArgs args) + /// { + /// Console.WriteLine("Download directory started for bucket {0}", args.Request.BucketName); + /// } + /// + public event EventHandler DownloadDirectoryInitiatedEvent; + + /// + /// Occurs when the download directory operation is completed. + /// + /// + /// + /// The DownloadDirectoryCompletedEvent is fired when the download directory operation is completed successfully. + /// The DownloadDirectoryCompletedEventArgs contains a snapshot of the transfer state at completion. + /// + /// + /// Attach event handlers to this event if you are interested in receiving + /// DownloadDirectoryCompletedEvent notifications. + /// + /// + /// + /// private void downloadCompleted(object sender, DownloadDirectoryCompletedEventArgs args) + /// { + /// Console.WriteLine("Download directory completed with {0} files downloaded", args.TransferredFiles); + /// } + /// + public event EventHandler DownloadDirectoryCompletedEvent; + + /// + /// Occurs when the download directory operation fails. + /// + /// + /// + /// The DownloadDirectoryFailedEvent is fired when the download directory operation fails. + /// The DownloadDirectoryFailedEventArgs contains a snapshot of the transfer state at failure. + /// + /// + /// Attach event handlers to this event if you are interested in receiving + /// DownloadDirectoryFailedEvent notifications. + /// + /// + /// + /// private void downloadFailed(object sender, DownloadDirectoryFailedEventArgs args) + /// { + /// Console.WriteLine("Download directory failed with {0} files downloaded out of {1} total", + /// args.TransferredFiles, args.TotalFiles); + /// } + /// + public event EventHandler DownloadDirectoryFailedEvent; + + /// + /// Raises the DownloadDirectoryInitiatedEvent. + /// + /// DownloadDirectoryInitiatedEventArgs args + internal void OnRaiseDownloadDirectoryInitiatedEvent(DownloadDirectoryInitiatedEventArgs args) + { + DownloadDirectoryInitiatedEvent?.Invoke(this, args); + } + + /// + /// Raises the DownloadDirectoryCompletedEvent. + /// + /// DownloadDirectoryCompletedEventArgs args + internal void OnRaiseDownloadDirectoryCompletedEvent(DownloadDirectoryCompletedEventArgs args) + { + DownloadDirectoryCompletedEvent?.Invoke(this, args); + } + + /// + /// Raises the DownloadDirectoryFailedEvent. + /// + /// DownloadDirectoryFailedEventArgs args + internal void OnRaiseDownloadDirectoryFailedEvent(DownloadDirectoryFailedEventArgs args) + { + DownloadDirectoryFailedEvent?.Invoke(this, args); + } /// /// Gets or sets the name of the bucket. @@ -188,6 +318,7 @@ internal bool IsSetUnmodifiedSinceDate() /// Specifies if multiple files will be downloaded concurrently. /// The number of concurrent web requests used is controlled /// by the TransferUtilityConfig.ConcurrencyLevel property. + /// The default value is false. /// #if BCL || NETSTANDARD public @@ -559,4 +690,203 @@ public override string ToString() this.TotalNumberOfFiles, this.NumberOfFilesDownloaded, this.TotalBytes, this.TransferredBytes); } } + + /// + /// Provides data for + /// which is raised when an individual object fails to download during a + /// DownloadDirectory operation. + /// + /// + /// Instances of this class are created by the transfer implementation and + /// passed to event subscribers. The instance contains the original directory + /// download request (), + /// the per-object download request that failed (), + /// and the exception that caused the failure. + /// + /// + /// + /// var request = new TransferUtilityDownloadDirectoryRequest { /* ... */ }; + /// request.ObjectDownloadFailedEvent += (sender, args) => + /// { + /// // args.DirectoryRequest: original directory request + /// // args.ObjectRequest: download request for the failed object + /// // args.Exception: exception thrown during the object download + /// Console.WriteLine($"Failed to download {args.ObjectRequest.Key}: {args.Exception}"); + /// }; + /// + /// + public class ObjectDownloadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The original that initiated the directory download. + /// The representing the individual object download that failed. + /// The that caused the object download to fail. + internal ObjectDownloadFailedEventArgs( + TransferUtilityDownloadDirectoryRequest directoryRequest, + TransferUtilityDownloadRequest objectRequest, + Exception exception) + { + DirectoryRequest = directoryRequest; + ObjectRequest = objectRequest; + Exception = exception; + } + + /// + /// Gets the original that initiated the directory download. + /// + /// + /// The directory-level request that configured the overall DownloadDirectory operation + /// (bucket, prefix, local directory, options, etc.). + /// + public TransferUtilityDownloadDirectoryRequest DirectoryRequest { get; private set; } + + /// + /// Gets the for the individual object that failed to download. + /// + /// + /// Contains per-object parameters such as the S3 key, version id (if set), and the local file path. + /// + public TransferUtilityDownloadRequest ObjectRequest { get; private set; } + + /// + /// Gets the that caused the object download to fail. + /// + /// + /// The exception thrown by the underlying download operation. Can be an , + /// , , or other exception type depending + /// on the failure mode. + /// + public Exception Exception { get; private set; } + } + + /// + /// Provides data for + /// which is raised when a download directory operation is initiated. + /// + public class DownloadDirectoryInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadDirectoryInitiatedEventArgs class. + /// + /// The transfer request + internal DownloadDirectoryInitiatedEventArgs(TransferUtilityDownloadDirectoryRequest request) + { + Request = request; + } + + /// + /// Gets the request associated with this transfer operation. + /// + public TransferUtilityDownloadDirectoryRequest Request { get; private set; } + } + + /// + /// Provides data for + /// which is raised when a download directory operation is completed successfully. + /// + public class DownloadDirectoryCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadDirectoryCompletedEventArgs class. + /// + /// The transfer request + /// The transfer response + /// The total number of bytes that have been transferred so far + /// The total size for all objects + /// The total number of files that have been transferred so far + /// The total number of files + internal DownloadDirectoryCompletedEventArgs(TransferUtilityDownloadDirectoryRequest request, + TransferUtilityDownloadDirectoryResponse response, long transferredBytes, long totalBytes, + long transferredFiles, long totalFiles) + { + Request = request; + Response = response; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + } + + /// + /// Gets the request associated with this transfer operation. + /// + public TransferUtilityDownloadDirectoryRequest Request { get; private set; } + + /// + /// Gets the response from the transfer operation. + /// + public TransferUtilityDownloadDirectoryResponse Response { get; private set; } + + /// + /// Gets the total number of bytes that have been transferred so far. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total size for all objects. Returns -1 if unknown. + /// + public long TotalBytes { get; private set; } + + /// + /// Gets the total number of files that have been transferred so far. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files. Returns -1 if unknown. + /// + public long TotalFiles { get; private set; } + } + + /// + /// Provides data for + /// which is raised when a download directory operation fails. + /// + public class DownloadDirectoryFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadDirectoryFailedEventArgs class. + /// + /// The transfer request + /// The total number of bytes that have been transferred so far + /// The total size for all objects + /// The total number of files that have been transferred so far + /// The total number of files + internal DownloadDirectoryFailedEventArgs(TransferUtilityDownloadDirectoryRequest request, + long transferredBytes, long totalBytes, long transferredFiles, long totalFiles) + { + Request = request; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + } + + /// + /// Gets the request associated with this transfer operation. + /// + public TransferUtilityDownloadDirectoryRequest Request { get; private set; } + + /// + /// Gets the total number of bytes that have been transferred so far. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total size for all objects. Returns -1 if unknown. + /// + public long TotalBytes { get; private set; } + + /// + /// Gets the total number of files that have been transferred so far. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files. Returns -1 if unknown. + /// + public long TotalFiles { get; private set; } + } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs new file mode 100644 index 000000000000..63533406b4d2 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using System; +using System.Collections.Generic; + +namespace Amazon.S3.Transfer +{ + /// + /// Contains the details returned from a Transfer Utility download directory operation. + /// + public class TransferUtilityDownloadDirectoryResponse + { + /// + /// The number of objects that have been successfully downloaded. + /// + public long ObjectsDownloaded { get; set; } + + /// + /// The number of objects that failed to download. Zero if all succeeded. + /// + public long ObjectsFailed { get; set; } + + /// + /// The collection of exceptions encountered when downloading individual objects. + /// + public IList Errors { get; set; } + + /// + /// Overall result of the directory download operation. + /// + public DirectoryResult Result { get; set; } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs index d9a4bc5c7119..95db655a02f7 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadRequest.cs @@ -90,5 +90,254 @@ internal void OnRaiseProgressEvent(WriteObjectProgressArgs progressArgs) { AWSSDKUtils.InvokeInBackground(WriteObjectProgressEvent, progressArgs, this); } + + /// + /// The event for DownloadInitiatedEvent notifications. All + /// subscribers will be notified when a download transfer operation + /// starts. + /// + /// The DownloadInitiatedEvent is fired exactly once when + /// a download transfer operation begins. The delegates attached to the event + /// will be passed information about the download request and + /// file path, but no progress information. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// DownloadInitiatedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void downloadStarted(object sender, DownloadInitiatedEventArgs args) + /// { + /// Console.WriteLine($"Download started: {args.FilePath}"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the DownloadInitiatedEvent delegate's invocation list + /// + /// TransferUtilityDownloadRequest request = new TransferUtilityDownloadRequest(); + /// request.DownloadInitiatedEvent += downloadStarted; + /// + ///
+ public event EventHandler DownloadInitiatedEvent; + + /// + /// The event for DownloadCompletedEvent notifications. All + /// subscribers will be notified when a download transfer operation + /// completes successfully. + /// + /// The DownloadCompletedEvent is fired exactly once when + /// a download transfer operation completes successfully. The delegates attached to the event + /// will be passed information about the completed download including + /// the final response from S3 with ETag, VersionId, and other metadata. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// DownloadCompletedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void downloadCompleted(object sender, DownloadCompletedEventArgs args) + /// { + /// Console.WriteLine($"Download completed: {args.FilePath}"); + /// Console.WriteLine($"Transferred: {args.TransferredBytes} bytes"); + /// Console.WriteLine($"ETag: {args.Response.ETag}"); + /// Console.WriteLine($"S3 Key: {args.Response.Key}"); + /// Console.WriteLine($"Version ID: {args.Response.VersionId}"); + /// } + /// + /// 2. Add this method to the DownloadCompletedEvent delegate's invocation list + /// + /// TransferUtilityDownloadRequest request = new TransferUtilityDownloadRequest(); + /// request.DownloadCompletedEvent += downloadCompleted; + /// + ///
+ public event EventHandler DownloadCompletedEvent; + + /// + /// The event for DownloadFailedEvent notifications. All + /// subscribers will be notified when a download transfer operation + /// fails. + /// + /// The DownloadFailedEvent is fired exactly once when + /// a download transfer operation fails. The delegates attached to the event + /// will be passed information about the failed download including + /// partial progress information, but no response data since the download failed. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// DownloadFailedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void downloadFailed(object sender, DownloadFailedEventArgs args) + /// { + /// Console.WriteLine($"Download failed: {args.FilePath}"); + /// Console.WriteLine($"Partial progress: {args.TransferredBytes} bytes"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the DownloadFailedEvent delegate's invocation list + /// + /// TransferUtilityDownloadRequest request = new TransferUtilityDownloadRequest(); + /// request.DownloadFailedEvent += downloadFailed; + /// + ///
+ public event EventHandler DownloadFailedEvent; + + /// + /// Causes the DownloadInitiatedEvent event to be fired. + /// + /// DownloadInitiatedEventArgs args + internal void OnRaiseTransferInitiatedEvent(DownloadInitiatedEventArgs args) + { + DownloadInitiatedEvent?.Invoke(this, args); + } + + /// + /// Causes the DownloadCompletedEvent event to be fired. + /// + /// DownloadCompletedEventArgs args + internal void OnRaiseTransferCompletedEvent(DownloadCompletedEventArgs args) + { + DownloadCompletedEvent?.Invoke(this, args); + } + + /// + /// Causes the DownloadFailedEvent event to be fired. + /// + /// DownloadFailedEventArgs args + internal void OnRaiseTransferFailedEvent(DownloadFailedEventArgs args) + { + DownloadFailedEvent?.Invoke(this, args); + } + } + + /// + /// Encapsulates the information needed when a download transfer operation is initiated. + /// Provides access to the original request without progress or total byte information. + /// + public class DownloadInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadInitiatedEventArgs class. + /// + /// The original TransferUtilityDownloadRequest created by the user + /// The file being downloaded + internal DownloadInitiatedEventArgs(TransferUtilityDownloadRequest request, string filePath) + { + Request = request; + FilePath = filePath; + } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// Contains all the download parameters and configuration. + /// + public TransferUtilityDownloadRequest Request { get; private set; } + + /// + /// Gets the file being downloaded. + /// + public string FilePath { get; private set; } + } + + /// + /// Encapsulates the information needed when a download transfer operation completes successfully. + /// Provides access to the original request, final response, and completion details. + /// + public class DownloadCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadCompletedEventArgs class. + /// + /// The original TransferUtilityDownloadRequest created by the user + /// The unified response from Transfer Utility + /// The file being downloaded + /// The total number of bytes transferred + /// The total number of bytes for the complete file + internal DownloadCompletedEventArgs(TransferUtilityDownloadRequest request, TransferUtilityDownloadResponse response, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + Response = response; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// Contains all the download parameters and configuration. + /// + public TransferUtilityDownloadRequest Request { get; private set; } + + /// + /// The unified response from Transfer Utility after successful download completion. + /// Contains mapped fields from GetObjectResponse. + /// + public TransferUtilityDownloadResponse Response { get; private set; } + + /// + /// Gets the file being downloaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the total number of bytes that were successfully transferred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes for the complete file. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Encapsulates the information needed when a download transfer operation fails. + /// Provides access to the original request and partial progress information. + /// + public class DownloadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the DownloadFailedEventArgs class. + /// + /// The original TransferUtilityDownloadRequest created by the user + /// The file being downloaded + /// The number of bytes transferred before failure + /// The total number of bytes for the complete file, or -1 if unknown + internal DownloadFailedEventArgs(TransferUtilityDownloadRequest request, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityDownloadRequest created by the user. + /// Contains all the download parameters and configuration. + /// + public TransferUtilityDownloadRequest Request { get; private set; } + + /// + /// Gets the file being downloaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the number of bytes that were transferred before the failure occurred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes for the complete file, or -1 if unknown. + /// This will be -1 for failures that occur before receiving the GetObjectResponse + /// (e.g., authentication errors, non-existent objects), and will contain the actual + /// file size for failures that occur after receiving response headers (e.g., disk full). + /// + public long TotalBytes { get; private set; } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs new file mode 100644 index 000000000000..36474a64c0aa --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs @@ -0,0 +1,37 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Generic; +using Amazon.Runtime; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility download operations. + /// Contains response metadata from download operations. + /// + public class TransferUtilityDownloadResponse : TransferUtilityGetObjectResponseBase + { + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs new file mode 100644 index 000000000000..431d498afe9e --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs @@ -0,0 +1,293 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Generic; +using Amazon.Runtime; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer +{ + /// + /// Base response object for Transfer Utility operations that retrieve S3 object metadata. + /// Contains response metadata from S3 GetObject operations. + /// + public abstract class TransferUtilityGetObjectResponseBase + { + /// + /// Gets and sets the AcceptRanges property. + /// + public string AcceptRanges { get; set; } + + /// + /// Gets and sets the property BucketKeyEnabled. + /// + /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with + /// Amazon Web Services KMS (SSE-KMS). + /// + /// + public bool? BucketKeyEnabled { get; set; } + + /// + /// The collection of headers for the response. + /// + public HeadersCollection Headers { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32. + /// + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. + /// + /// + public string ChecksumCRC32 { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32C. + /// + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. + /// + /// + public string ChecksumCRC32C { get; set; } + + /// + /// Gets and sets the property ChecksumCRC64NVME. + /// + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. + /// + /// + public string ChecksumCRC64NVME { get; set; } + + /// + /// Gets and sets the property ChecksumSHA1. + /// + /// The Base64 encoded, 160-bit SHA-1 digest of the object. + /// + /// + public string ChecksumSHA1 { get; set; } + + /// + /// Gets and sets the property ChecksumSHA256. + /// + /// The Base64 encoded, 256-bit SHA-256 checksum of the object. + /// + /// + public string ChecksumSHA256 { get; set; } + + /// + /// Gets and sets the property ChecksumType. + /// + /// The checksum type used to calculate the object-level checksum. + /// + /// + public ChecksumType ChecksumType { get; set; } + + /// + /// Gets and sets the ContentRange property. + /// + public string ContentRange { get; set; } + + /// + /// Gets and sets the DeleteMarker property. + /// + /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + /// + /// + public string DeleteMarker { get; set; } + + /// + /// Gets and sets the ETag property. + /// + /// An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. + /// + /// + public string ETag { get; set; } + + /// + /// Gets and sets the property Expiration. + /// + /// If the object expiration is configured, this will contain the expiration date and rule ID. + /// + /// + public Expiration Expiration { get; set; } + + /// + /// Gets and sets the ExpiresString property. + /// + /// The date and time at which the object is no longer cacheable (string format). + /// + /// + public string ExpiresString { get; set; } + + /// + /// Gets and sets the property LastModified. + /// + /// Date and time when the object was last modified. + /// + /// + public DateTime? LastModified { get; set; } + + /// + /// Gets and sets the Metadata property. + /// + /// The collection of metadata for the object. + /// + /// + public MetadataCollection Metadata { get; set; } + + /// + /// Gets and sets the property MissingMeta. + /// + /// This is set to the number of metadata entries not returned in the headers that are + /// prefixed with x-amz-meta-. + /// + /// + public int? MissingMeta { get; set; } + + /// + /// Gets and sets the property ObjectLockLegalHoldStatus. + /// + /// Indicates whether this object has an active legal hold. + /// + /// + public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus { get; set; } + + /// + /// Gets and sets the property ObjectLockMode. + /// + /// The Object Lock mode that's currently in place for this object. + /// + /// + public ObjectLockMode ObjectLockMode { get; set; } + + /// + /// Gets and sets the property ObjectLockRetainUntilDate. + /// + /// The date and time when this object's Object Lock will expire. + /// + /// + public DateTime? ObjectLockRetainUntilDate { get; set; } + + /// + /// Gets and sets the PartsCount property. + /// + /// The number of parts this object has. + /// + /// + public int? PartsCount { get; set; } + + /// + /// Gets and sets the property ReplicationStatus. + /// + /// Amazon S3 can return this if your request involves a bucket that is either a source + /// or destination in a replication rule. + /// + /// + public ReplicationStatus ReplicationStatus { get; set; } + + /// + /// Gets and sets the RequestCharged property. + /// + /// If present, indicates that the requester was successfully charged for the request. + /// + /// + public RequestCharged RequestCharged { get; set; } + + /// + /// Gets and sets the RestoreExpiration property. + /// + /// RestoreExpiration will be set for objects that have been restored from Amazon Glacier. + /// It indicates for those objects how long the restored object will exist. + /// + /// + public DateTime? RestoreExpiration { get; set; } + + /// + /// + /// + public bool? RestoreInProgress { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerMethod property. + /// + /// The server-side encryption algorithm to be used with the customer provided key. + /// + /// + public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerProvidedKeyMD5 property. + /// + /// The MD5 server-side encryption of the customer-provided encryption key. + /// + /// + public string ServerSideEncryptionCustomerProvidedKeyMD5 { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionKeyManagementServiceKeyId property. + /// + /// If present, indicates the ID of the KMS key that was used for object encryption. + /// + /// + public string ServerSideEncryptionKeyManagementServiceKeyId { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionMethod property. + /// + /// The server-side encryption algorithm used when you store this object in Amazon S3. + /// + /// + public ServerSideEncryptionMethod ServerSideEncryptionMethod { get; set; } + + /// + /// Gets and sets the property StorageClass. + /// + /// Provides storage class information of the object. + /// + /// + public S3StorageClass StorageClass { get; set; } + + /// + /// Gets and sets the property TagCount. + /// + /// The number of tags, if any, on the object. + /// + /// + public int? TagCount { get; set; } + + /// + /// Gets and sets the property VersionId. + /// + /// Version ID of the object. + /// + /// + public string VersionId { get; set; } + + /// + /// Gets and sets the property WebsiteRedirectLocation. + /// + /// If the bucket is configured as a website, redirects requests for this object to another + /// object in the same bucket or to an external URL. + /// + /// + public string WebsiteRedirectLocation { get; set; } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs index 98255c63625c..ceeb5fa8e4b8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamRequest.cs @@ -32,7 +32,21 @@ namespace Amazon.S3.Transfer ///
public class TransferUtilityOpenStreamRequest : BaseDownloadRequest { + private int _maxInMemoryParts = 1024; - + /// + /// Gets or sets the maximum number of parts to buffer in memory during multipart downloads. + /// The default value is 1024. + /// + /// + /// This property controls memory usage during streaming downloads. When combined with the + /// default part size of 8MB, the default value of 1024 parts allows up to 8GB of memory usage. + /// Adjust this value based on your application's memory constraints and performance requirements. + /// + public int MaxInMemoryParts + { + get { return this._maxInMemoryParts; } + set { this._maxInMemoryParts = value; } + } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs new file mode 100644 index 000000000000..df2f57bce35f --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs @@ -0,0 +1,97 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.IO; +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility open stream operations. + /// Contains the stream and response metadata from open stream operations. + /// + public class TransferUtilityOpenStreamResponse : TransferUtilityGetObjectResponseBase, IDisposable + { + private bool disposed; + private Stream responseStream; + + #region Dispose Pattern + + /// + /// Disposes of all managed and unmanaged resources. + /// + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + /// + /// Releases the unmanaged resources used by the TransferUtilityOpenStreamResponse and optionally disposes of the managed resources. + /// + /// true to release both managed and unmanaged resources; false to releases only unmanaged resources. + protected virtual void Dispose(bool disposing) + { + if (!this.disposed) + { + if (disposing) + { + // Remove Managed Resources + // I.O.W. remove resources that have to be explicitly + // "Dispose"d or Closed. For an S3 Response, these are: + // 1. The Response Stream for GET Object requests + // 2. The HttpResponse object for GET Object requests + if (responseStream != null) + { + responseStream.Dispose(); + } + } + + responseStream = null; + disposed = true; + } + } + + #endregion + + /// + /// Gets and sets the ResponseStream property. + /// + /// An open stream read from to get the data from S3. In order to + /// use this stream without leaking the underlying resource, please + /// wrap access to the stream within a using block. + /// + /// + public Stream ResponseStream + { + get { return this.responseStream; } + set { this.responseStream = value; } + } + + // Check to see if ResponseStream property is set + internal bool IsSetResponseStream() + { + return this.responseStream != null; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs index cf7be9f65437..802d544ef86c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs @@ -42,6 +42,98 @@ public class TransferUtilityUploadDirectoryRequest : BaseUploadRequest string _keyPrefix; private bool _uploadFilesConcurrently = false; SearchOption _searchOption = SearchOption.TopDirectoryOnly; + private FailurePolicy failurePolicy = FailurePolicy.AbortOnFailure; + + /// + /// Gets or sets the failure policy for the upload directory operation. + /// Determines whether the operation should abort or continue when a failure occurs during upload. + /// The default value is . + /// + public FailurePolicy FailurePolicy + { + get { return this.failurePolicy; } + set { this.failurePolicy = value; } + } + + /// + /// Occurs when the upload directory operation is initiated. + /// + /// + /// This event is raised before any files are uploaded, providing information about + /// the total number of files and bytes that will be uploaded. + /// + public event EventHandler UploadDirectoryInitiatedEvent; + + /// + /// Occurs when the upload directory operation completes successfully. + /// + /// + /// This event is raised after all files have been processed (successfully or with failures), + /// providing the final response and statistics. + /// + public event EventHandler UploadDirectoryCompletedEvent; + + /// + /// Occurs when the upload directory operation fails. + /// + /// + /// This event is raised when the entire operation fails (not individual file failures). + /// Individual file failures are reported through . + /// + public event EventHandler UploadDirectoryFailedEvent; + + /// + /// Occurs when an individual object fails to upload during an UploadDirectory operation. + /// + /// + /// Subscribers will receive a instance containing + /// the original , the failed + /// , and the exception that caused the failure. + /// This event is raised on a background thread by the transfer utility. + /// + /// + /// request.ObjectUploadFailedEvent += (sender, args) => + /// { + /// // inspect args.DirectoryRequest, args.ObjectRequest, args.Exception + /// }; + /// + public event EventHandler ObjectUploadFailedEvent; + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryInitiatedEvent(UploadDirectoryInitiatedEventArgs args) + { + UploadDirectoryInitiatedEvent?.Invoke(this, args); + } + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryCompletedEvent(UploadDirectoryCompletedEventArgs args) + { + UploadDirectoryCompletedEvent?.Invoke(this, args); + } + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryFailedEvent(UploadDirectoryFailedEventArgs args) + { + UploadDirectoryFailedEvent?.Invoke(this, args); + } + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The details of the failed object upload. + internal void OnRaiseObjectUploadFailedEvent(ObjectUploadFailedEventArgs args) + { + ObjectUploadFailedEvent?.Invoke(this, args); + } /// /// Gets or sets the directory where files are uploaded from. @@ -382,4 +474,224 @@ public UploadDirectoryFileRequestArgs(TransferUtilityUploadRequest request) /// public TransferUtilityUploadRequest UploadRequest { get; set; } } + + /// + /// Provides data for . + /// + public class UploadDirectoryInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The total number of files to upload. + /// The total number of bytes to upload. + internal UploadDirectoryInitiatedEventArgs( + TransferUtilityUploadDirectoryRequest request, + long totalFiles, + long totalBytes) + { + Request = request; + TotalFiles = totalFiles; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the total number of files to upload. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the total number of bytes to upload. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Provides data for . + /// + public class UploadDirectoryCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The upload directory response. + /// The number of files successfully uploaded. + /// The total number of files attempted. + /// The number of bytes transferred. + /// The total number of bytes. + internal UploadDirectoryCompletedEventArgs( + TransferUtilityUploadDirectoryRequest request, + TransferUtilityUploadDirectoryResponse response, + long transferredFiles, + long totalFiles, + long transferredBytes, + long totalBytes) + { + Request = request; + Response = response; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the upload directory response. + /// + public TransferUtilityUploadDirectoryResponse Response { get; private set; } + + /// + /// Gets the number of files successfully uploaded. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files attempted. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the number of bytes transferred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Provides data for . + /// + public class UploadDirectoryFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The number of files successfully uploaded before failure. + /// The total number of files attempted. + /// The number of bytes transferred before failure. + /// The total number of bytes. + internal UploadDirectoryFailedEventArgs( + TransferUtilityUploadDirectoryRequest request, + long transferredFiles, + long totalFiles, + long transferredBytes, + long totalBytes) + { + Request = request; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the number of files successfully uploaded before failure. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files attempted. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the number of bytes transferred before failure. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Provides data for + /// which is raised when an individual object fails to upload during an + /// UploadDirectory operation. + /// + /// + /// Instances of this class are created by the transfer implementation and + /// passed to event subscribers. The instance contains the original directory + /// upload request (), + /// the per-object upload request that failed (), + /// and the exception that caused the failure. + /// + /// + /// + /// var request = new TransferUtilityUploadDirectoryRequest { /* ... */ }; + /// request.ObjectUploadFailedEvent += (sender, args) => + /// { + /// // args.DirectoryRequest: original directory request + /// // args.ObjectRequest: upload request for the failed object + /// // args.Exception: exception thrown during the object upload + /// Console.WriteLine($"Failed to upload {args.ObjectRequest.Key}: {args.Exception}"); + /// }; + /// + /// + public class ObjectUploadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The original that initiated the directory upload. + /// The representing the individual object upload that failed. + /// The that caused the object upload to fail. + internal ObjectUploadFailedEventArgs( + TransferUtilityUploadDirectoryRequest directoryRequest, + TransferUtilityUploadRequest objectRequest, + Exception exception) + { + DirectoryRequest = directoryRequest; + ObjectRequest = objectRequest; + Exception = exception; + } + + /// + /// Gets the original that initiated the directory upload. + /// + /// + /// The directory-level request that configured the overall UploadDirectory operation. + /// + public TransferUtilityUploadDirectoryRequest DirectoryRequest { get; private set; } + + /// + /// Gets the for the individual object that failed to upload. + /// + /// + /// Contains per-object parameters such as the S3 key and version id (if set). + /// + public TransferUtilityUploadRequest ObjectRequest { get; private set; } + + /// + /// Gets the that caused the object upload to fail. + /// + /// + /// The exception thrown by the underlying upload operation. Can be an , + /// , , or other exception type depending + /// on the failure mode. + /// + public Exception Exception { get; private set; } + } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs new file mode 100644 index 000000000000..2c3912207060 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs @@ -0,0 +1,54 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Generic; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility upload directory operations. + /// Contains response metadata from upload directory operations. + /// + public class TransferUtilityUploadDirectoryResponse + { + /// + /// The number of objects that have been successfully uploaded. + /// + public long ObjectsUploaded { get; set; } + + /// + /// The number of objects that failed to upload. Zero if all succeeded. + /// + public long ObjectsFailed { get; set; } + + /// + /// The collection of exceptions encountered when uploading individual objects. + /// + public IList Errors { get; set; } + + /// + /// Overall result of the directory upload operation. + /// + public DirectoryResult Result { get; set; } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs index b21ab2ae7602..879b4395849d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadRequest.cs @@ -25,6 +25,7 @@ using System.IO; using System.Text; +using Amazon.Runtime; using Amazon.Runtime.Internal; using Amazon.S3.Model; using Amazon.Util; @@ -170,6 +171,132 @@ internal bool IsSetPartSize() /// public event EventHandler UploadProgressEvent; + /// + /// The event for UploadInitiatedEvent notifications. All + /// subscribers will be notified when a transfer operation + /// starts. + /// + /// The UploadInitiatedEvent is fired exactly once when + /// a transfer operation begins. The delegates attached to the event + /// will be passed information about the upload request and + /// total file size, but no progress information. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// UploadInitiatedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void uploadStarted(object sender, UploadInitiatedEventArgs args) + /// { + /// Console.WriteLine($"Upload started: {args.FilePath}"); + /// Console.WriteLine($"Total size: {args.TotalBytes} bytes"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the UploadInitiatedEvent delegate's invocation list + /// + /// TransferUtilityUploadRequest request = new TransferUtilityUploadRequest(); + /// request.UploadInitiatedEvent += uploadStarted; + /// + ///
+ public event EventHandler UploadInitiatedEvent; + + /// + /// The event for UploadCompletedEvent notifications. All + /// subscribers will be notified when a transfer operation + /// completes successfully. + /// + /// The UploadCompletedEvent is fired exactly once when + /// a transfer operation completes successfully. The delegates attached to the event + /// will be passed information about the completed upload including + /// the final response from S3 with ETag, VersionId, and other metadata. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// UploadCompletedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void uploadCompleted(object sender, UploadCompletedEventArgs args) + /// { + /// Console.WriteLine($"Upload completed: {args.FilePath}"); + /// Console.WriteLine($"Transferred: {args.TransferredBytes} bytes"); + /// Console.WriteLine($"ETag: {args.Response.ETag}"); + /// Console.WriteLine($"S3 Key: {args.Response.Key}"); + /// Console.WriteLine($"Version ID: {args.Response.VersionId}"); + /// } + /// + /// 2. Add this method to the UploadCompletedEvent delegate's invocation list + /// + /// TransferUtilityUploadRequest request = new TransferUtilityUploadRequest(); + /// request.UploadCompletedEvent += uploadCompleted; + /// + ///
+ public event EventHandler UploadCompletedEvent; + + /// + /// The event for UploadFailedEvent notifications. All + /// subscribers will be notified when a transfer operation + /// fails. + /// + /// The UploadFailedEvent is fired exactly once when + /// a transfer operation fails. The delegates attached to the event + /// will be passed information about the failed upload including + /// partial progress information, but no response data since the upload failed. + /// + /// + /// + /// Subscribe to this event if you want to receive + /// UploadFailedEvent notifications. Here is how:
+ /// 1. Define a method with a signature similar to this one: + /// + /// private void uploadFailed(object sender, UploadFailedEventArgs args) + /// { + /// Console.WriteLine($"Upload failed: {args.FilePath}"); + /// Console.WriteLine($"Partial progress: {args.TransferredBytes} / {args.TotalBytes} bytes"); + /// var percent = (double)args.TransferredBytes / args.TotalBytes * 100; + /// Console.WriteLine($"Completion: {percent:F1}%"); + /// Console.WriteLine($"Bucket: {args.Request.BucketName}"); + /// Console.WriteLine($"Key: {args.Request.Key}"); + /// } + /// + /// 2. Add this method to the UploadFailedEvent delegate's invocation list + /// + /// TransferUtilityUploadRequest request = new TransferUtilityUploadRequest(); + /// request.UploadFailedEvent += uploadFailed; + /// + ///
+ public event EventHandler UploadFailedEvent; + + /// + /// Causes the UploadInitiatedEvent event to be fired. + /// + /// UploadInitiatedEventArgs args + internal void OnRaiseTransferInitiatedEvent(UploadInitiatedEventArgs args) + { + UploadInitiatedEvent?.Invoke(this, args); + } + + /// + /// Causes the UploadCompletedEvent event to be fired. + /// + /// UploadCompletedEventArgs args + internal void OnRaiseTransferCompletedEvent(UploadCompletedEventArgs args) + { + UploadCompletedEvent?.Invoke(this, args); + } + + /// + /// Causes the UploadFailedEvent event to be fired. + /// + /// UploadFailedEventArgs args + internal void OnRaiseTransferFailedEvent(UploadFailedEventArgs args) + { + UploadFailedEvent?.Invoke(this, args); + } + /// /// Causes the UploadProgressEvent event to be fired. @@ -460,7 +587,7 @@ public class UploadProgressArgs : TransferProgressArgs /// currently transferred bytes and the /// total number of bytes to be transferred /// - /// The how many bytes were transferred since last event. + /// How many bytes were transferred since last event. /// The number of bytes transferred /// The total number of bytes to be transferred public UploadProgressArgs(long incrementTransferred, long transferred, long total) @@ -473,7 +600,7 @@ public UploadProgressArgs(long incrementTransferred, long transferred, long tota /// currently transferred bytes and the /// total number of bytes to be transferred ///
- /// The how many bytes were transferred since last event. + /// How many bytes were transferred since last event. /// The number of bytes transferred /// The total number of bytes to be transferred /// The file being uploaded @@ -487,7 +614,7 @@ public UploadProgressArgs(long incrementTransferred, long transferred, long tota /// currently transferred bytes and the /// total number of bytes to be transferred /// - /// The how many bytes were transferred since last event. + /// How many bytes were transferred since last event. /// The number of bytes transferred /// The total number of bytes to be transferred /// A compensation for any upstream aggregators if this event to correct theit totalTransferred count, @@ -500,11 +627,164 @@ internal UploadProgressArgs(long incrementTransferred, long transferred, long to this.CompensationForRetry = compensationForRetry; } + /// + /// Constructor for upload progress with request + /// + /// How many bytes were transferred since last event. + /// The number of bytes transferred + /// The total number of bytes to be transferred + /// A compensation for any upstream aggregators if this event to correct their totalTransferred count, + /// in case the underlying request is retried. + /// The file being uploaded + /// The original TransferUtilityUploadRequest created by the user + internal UploadProgressArgs(long incrementTransferred, long transferred, long total, long compensationForRetry, string filePath, TransferUtilityUploadRequest request) + : base(incrementTransferred, transferred, total) + { + this.FilePath = filePath; + this.CompensationForRetry = compensationForRetry; + this.Request = request; + } + /// /// Gets the FilePath. /// public string FilePath { get; private set; } internal long CompensationForRetry { get; set; } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// + public TransferUtilityUploadRequest Request { get; internal set; } + } + + /// + /// Encapsulates the information needed when a transfer operation is initiated. + /// Provides access to the original request and total file size without any progress information. + /// + public class UploadInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the UploadInitiatedEventArgs class. + /// + /// The original TransferUtilityUploadRequest created by the user + /// The file being uploaded + /// The total number of bytes to be transferred + internal UploadInitiatedEventArgs(TransferUtilityUploadRequest request, string filePath, long totalBytes) + { + Request = request; + FilePath = filePath; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// Contains all the upload parameters and configuration. + /// + public TransferUtilityUploadRequest Request { get; private set; } + + /// + /// Gets the file being uploaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the total number of bytes to be transferred. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Encapsulates the information needed when a transfer operation completes successfully. + /// Provides access to the original request, final response, and completion details. + /// + public class UploadCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the UploadCompletedEventArgs class. + /// + /// The original TransferUtilityUploadRequest created by the user + /// The unified response from Transfer Utility + /// The file being uploaded + /// The total number of bytes transferred + /// The total number of bytes that were transferred (should equal transferredBytes for successful uploads). + internal UploadCompletedEventArgs(TransferUtilityUploadRequest request, TransferUtilityUploadResponse response, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + Response = response; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// Contains all the upload parameters and configuration. + /// + public TransferUtilityUploadRequest Request { get; private set; } + + /// + /// The unified response from Transfer Utility after successful upload completion. + /// Contains mapped fields from either PutObjectResponse (simple uploads) or CompleteMultipartUploadResponse (multipart uploads). + /// + public TransferUtilityUploadResponse Response { get; private set; } + + /// + /// Gets the file being uploaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the total number of bytes that were successfully transferred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes that were transferred (should equal TransferredBytes for successful uploads). + /// + public long TotalBytes { get; private set; } + } + + /// + /// Encapsulates the information needed when a transfer operation fails. + /// Provides access to the original request and partial progress information. + /// + public class UploadFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the UploadFailedEventArgs class. + /// + /// The original TransferUtilityUploadRequest created by the user + /// The file being uploaded + /// The number of bytes transferred before failure + /// The total number of bytes that should have been transferred + internal UploadFailedEventArgs(TransferUtilityUploadRequest request, string filePath, long transferredBytes, long totalBytes) + { + Request = request; + FilePath = filePath; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// The original TransferUtilityUploadRequest created by the user. + /// Contains all the upload parameters and configuration. + /// + public TransferUtilityUploadRequest Request { get; private set; } + + /// + /// Gets the file being uploaded. + /// + public string FilePath { get; private set; } + + /// + /// Gets the number of bytes that were transferred before the failure occurred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes that should have been transferred. + /// + public long TotalBytes { get; private set; } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs new file mode 100644 index 000000000000..e7361bfd629f --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs @@ -0,0 +1,578 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using Amazon.Runtime; +using Amazon.S3.Model; +using Amazon.Runtime.Internal; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility upload operations. + /// Contains unified response fields from both simple uploads (PutObjectResponse) + /// and multipart uploads (CompleteMultipartUploadResponse). + /// + public class TransferUtilityUploadResponse + { + private bool? _bucketKeyEnabled; + private string _bucketName; + private string _checksumCRC32; + private string _checksumCRC32C; + private string _checksumCRC64NVME; + private string _checksumSHA1; + private string _checksumSHA256; + private ChecksumType _checksumType; + private string _etag; + private Expiration _expiration; + private string _key; + private string _location; + private RequestCharged _requestCharged; + private ServerSideEncryptionCustomerMethod _serverSideEncryptionCustomerMethod; + private long? _size; + private string _sseCustomerKeyMD5; + private string _sseKmsEncryptionContext; + private string _sseKmsKeyId; + private ServerSideEncryptionMethod _serverSideEncryption; + private string _versionId; + + /// + /// Gets and sets the property BucketKeyEnabled. + /// + /// Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption + /// with Key Management Service (KMS) keys (SSE-KMS). + /// + /// + public bool? BucketKeyEnabled + { + get { return this._bucketKeyEnabled; } + set { this._bucketKeyEnabled = value; } + } + + /// + /// Checks if BucketKeyEnabled property is set. + /// + /// true if BucketKeyEnabled property is set. + internal bool IsSetBucketKeyEnabled() + { + return this._bucketKeyEnabled.HasValue; + } + + /// + /// Gets and sets the property BucketName. + /// + /// The name of the bucket that contains the newly created object. Does not return the + /// access point ARN or access point alias if used. + /// + /// + /// + /// When using this action with an access point, you must direct requests to the access + /// point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + /// When using this action with an access point through the Amazon Web Services SDKs, + /// you provide the access point ARN in place of the bucket name. For more information + /// about access point ARNs, see Using + /// access points in the Amazon S3 User Guide. + /// + /// + /// + /// When you use this action with Amazon S3 on Outposts, you must direct requests to the + /// S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + /// When you use this action with S3 on Outposts through the Amazon Web Services SDKs, + /// you provide the Outposts access point ARN in place of the bucket name. For more information + /// about S3 on Outposts ARNs, see What + /// is S3 on Outposts? in the Amazon S3 User Guide. + /// + /// + public string BucketName + { + get { return this._bucketName; } + set { this._bucketName = value; } + } + + /// + /// Checks if BucketName property is set. + /// + /// true if BucketName property is set. + internal bool IsSetBucketName() + { + return !string.IsNullOrEmpty(this._bucketName); + } + + /// + /// Gets and sets the property Key. + /// + /// The object key of the newly created object. + /// + /// + public string Key + { + get { return this._key; } + set { this._key = value; } + } + + /// + /// Checks if Key property is set. + /// + /// true if Key property is set. + internal bool IsSetKey() + { + return !string.IsNullOrEmpty(this._key); + } + + /// + /// Gets and sets the property Location. + /// + /// The URI that identifies the newly created object. + /// + /// + public string Location + { + get { return this._location; } + set { this._location = value; } + } + + /// + /// Checks if Location property is set. + /// + /// true if Location property is set. + internal bool IsSetLocation() + { + return !string.IsNullOrEmpty(this._location); + } + + /// + /// Gets and sets the property Size. + /// + /// The size of the object in bytes. This will only be present if you append to an object. + /// + /// This functionality is only supported for objects in the S3 Express One Zone storage class in directory buckets. + /// + public long? Size + { + get { return this._size; } + set { this._size = value; } + } + + /// + /// Checks if Size property is set. + /// + /// true if Size property is set. + internal bool IsSetSize() + { + return this._size.HasValue; + } + + /// + /// Gets and sets the property ChecksumCRC32. + /// + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only present + /// if the checksum was uploaded with the object. When you use an API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumCRC32 + { + get { return this._checksumCRC32; } + set { this._checksumCRC32 = value; } + } + + /// + /// Checks if ChecksumCRC32 property is set. + /// + /// true if ChecksumCRC32 property is set. + internal bool IsSetChecksumCRC32() + { + return !string.IsNullOrEmpty(this._checksumCRC32); + } + + /// + /// Gets and sets the property ChecksumCRC32C. + /// + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present + /// if the checksum was uploaded with the object. When you use an API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumCRC32C + { + get { return this._checksumCRC32C; } + set { this._checksumCRC32C = value; } + } + + /// + /// Checks if ChecksumCRC32C property is set. + /// + /// true if ChecksumCRC32C property is set. + internal bool IsSetChecksumCRC32C() + { + return !string.IsNullOrEmpty(this._checksumCRC32C); + } + + /// + /// Gets and sets the property ChecksumCRC64NVME. + /// + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This header is present + /// if it was uploaded with the CRC-64NVME checksum algorithm, or if it was uploaded + /// without a checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). + /// For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumCRC64NVME + { + get { return this._checksumCRC64NVME; } + set { this._checksumCRC64NVME = value; } + } + + /// + /// Checks if ChecksumCRC64NVME property is set. + /// + /// true if ChecksumCRC64NVME property is set. + internal bool IsSetChecksumCRC64NVME() + { + return !string.IsNullOrEmpty(this._checksumCRC64NVME); + } + + /// + /// Gets and sets the property ChecksumSHA1. + /// + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present + /// if it was uploaded with the object. When you use the API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumSHA1 + { + get { return this._checksumSHA1; } + set { this._checksumSHA1 = value; } + } + + /// + /// Checks if ChecksumSHA1 property is set. + /// + /// true if ChecksumSHA1 property is set. + internal bool IsSetChecksumSHA1() + { + return !string.IsNullOrEmpty(this._checksumSHA1); + } + + /// + /// Gets and sets the property ChecksumSHA256. + /// + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present + /// if it was uploaded with the object. When you use an API operation on an object that + /// was uploaded using multipart uploads, this value may not be a direct checksum value + /// of the full object. Instead, it's a calculation based on the checksum values of each + /// individual part. For more information about how checksums are calculated with multipart + /// uploads, see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public string ChecksumSHA256 + { + get { return this._checksumSHA256; } + set { this._checksumSHA256 = value; } + } + + /// + /// Checks if ChecksumSHA256 property is set. + /// + /// true if ChecksumSHA256 property is set. + internal bool IsSetChecksumSHA256() + { + return !string.IsNullOrEmpty(this._checksumSHA256); + } + + /// + /// Gets and sets the property ChecksumType. + /// + /// This header specifies the checksum type of the object, which determines how part-level + /// checksums are combined to create an object-level checksum for multipart objects. For + /// PutObject uploads, the checksum type is always FULL_OBJECT. You can use + /// this header as a data integrity check to verify that the checksum type that is received + /// is the same checksum that was specified. For more information, + /// see + /// Checking object integrity in the Amazon S3 User Guide. + /// + /// + public ChecksumType ChecksumType + { + get { return this._checksumType; } + set { this._checksumType = value; } + } + + /// + /// Checks if ChecksumType property is set. + /// + /// true if ChecksumType property is set. + internal bool IsSetChecksumType() + { + return this._checksumType != null; + } + + /// + /// Gets and sets the property ETag. + /// + /// Entity tag for the uploaded object. + /// + /// + /// + /// General purpose buckets - To ensure that data is not corrupted traversing + /// the network, for objects where the ETag is the MD5 digest of the object, you can calculate + /// the MD5 while putting an object to Amazon S3 and compare the returned ETag to the + /// calculated MD5 value. + /// + /// + /// + /// Directory buckets - The ETag for the object in a directory bucket isn't the + /// MD5 digest of the object. + /// + /// + public string ETag + { + get { return this._etag; } + set { this._etag = value; } + } + + /// + /// Checks if ETag property is set. + /// + /// true if ETag property is set. + internal bool IsSetETag() + { + return !string.IsNullOrEmpty(this._etag); + } + + /// + /// Gets and sets the property Expiration. + /// + /// If the object expiration is configured, this will contain the expiration date (expiry-date) + /// and rule ID (rule-id). The value of rule-id is URL encoded. + /// + /// + /// Object expiration information is not returned for directory buckets (for those, the + /// response header will contain the value "NotImplemented"). + /// + /// + public Expiration Expiration + { + get { return this._expiration; } + set { this._expiration = value; } + } + + /// + /// Checks if Expiration property is set. + /// + /// true if Expiration property is set. + internal bool IsSetExpiration() + { + return this._expiration != null; + } + + /// + /// Gets and sets the property RequestCharged. + /// + /// If present, indicates that the requester was successfully charged for the request. + /// + /// + public RequestCharged RequestCharged + { + get { return this._requestCharged; } + set { this._requestCharged = value; } + } + + /// + /// Checks if RequestCharged property is set. + /// + /// true if RequestCharged property is set. + internal bool IsSetRequestCharged() + { + return this._requestCharged != null; + } + + /// + /// The Server-side encryption algorithm to be used with the customer provided key. + /// + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// + public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod + { + get { return this._serverSideEncryptionCustomerMethod; } + set { this._serverSideEncryptionCustomerMethod = value; } + } + + /// + /// Checks if ServerSideEncryptionCustomerMethod property is set. + /// + /// true if ServerSideEncryptionCustomerMethod property is set. + internal bool IsSetServerSideEncryptionCustomerMethod() + { + return this._serverSideEncryptionCustomerMethod != null; + } + + /// + /// The MD5 of the customer encryption key specified in the ServerSideEncryptionCustomerProvidedKey property. The MD5 is + /// base 64 encoded. This field is optional, the SDK will calculate the MD5 if this is not set. + /// + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// + public string ServerSideEncryptionCustomerProvidedKeyMD5 + { + get { return this._sseCustomerKeyMD5; } + set { this._sseCustomerKeyMD5 = value; } + } + + /// + /// Checks if ServerSideEncryptionCustomerProvidedKeyMD5 property is set. + /// + /// true if ServerSideEncryptionCustomerProvidedKeyMD5 property is set. + internal bool IsSetServerSideEncryptionCustomerProvidedKeyMD5() + { + return !string.IsNullOrEmpty(this._sseCustomerKeyMD5); + } + + /// + /// + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. + /// The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + /// This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. + /// + /// + [AWSProperty(Sensitive=true)] + public string ServerSideEncryptionKeyManagementServiceEncryptionContext + { + get { return this._sseKmsEncryptionContext; } + set { this._sseKmsEncryptionContext = value; } + } + + /// + /// Checks if ServerSideEncryptionKeyManagementServiceEncryptionContext property is set. + /// + /// true if ServerSideEncryptionKeyManagementServiceEncryptionContext property is set. + internal bool IsSetServerSideEncryptionKeyManagementServiceEncryptionContext() + { + return !string.IsNullOrEmpty(this._sseKmsEncryptionContext); + } + + /// + /// + /// If present, indicates the ID of the KMS key that was used for object encryption. + /// + /// + [AWSProperty(Sensitive=true)] + public string ServerSideEncryptionKeyManagementServiceKeyId + { + get { return this._sseKmsKeyId; } + set { this._sseKmsKeyId = value; } + } + + /// + /// Checks if ServerSideEncryptionKeyManagementServiceKeyId property is set. + /// + /// true if ServerSideEncryptionKeyManagementServiceKeyId property is set. + internal bool IsSetServerSideEncryptionKeyManagementServiceKeyId() + { + return !string.IsNullOrEmpty(this._sseKmsKeyId); + } + + /// + /// + /// The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx. + /// + /// + /// + /// When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx. + /// + /// + /// + public ServerSideEncryptionMethod ServerSideEncryptionMethod + { + get { return this._serverSideEncryption; } + set { this._serverSideEncryption = value; } + } + + /// + /// Checks if ServerSideEncryptionMethod property is set. + /// + /// true if ServerSideEncryptionMethod property is set. + internal bool IsSetServerSideEncryptionMethod() + { + return this._serverSideEncryption != null; + } + + /// + /// Gets and sets the property VersionId. + /// + /// Version ID of the object. + /// + /// + /// + /// If you enable versioning for a bucket, Amazon S3 automatically generates a unique + /// version ID for the object being stored. Amazon S3 returns this ID in the response. + /// When you enable versioning for a bucket, if Amazon S3 receives multiple write requests + /// for the same object simultaneously, it stores all of the objects. For more information + /// about versioning, see Adding + /// Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For + /// information about returning the versioning state of a bucket, see GetBucketVersioning. + /// + /// + /// + /// + /// This functionality is not supported for directory buckets. + /// + /// + /// + public string VersionId + { + get { return this._versionId; } + set { this._versionId = value; } + } + + /// + /// Checks if VersionId property is set. + /// + /// true if VersionId property is set. + internal bool IsSetVersionId() + { + return !string.IsNullOrEmpty(this._versionId); + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs index d67a94b00856..2cf68c4d14d4 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/ITransferUtility.async.cs @@ -168,6 +168,122 @@ public partial interface ITransferUtility : IDisposable /// /// The task object representing the asynchronous operation. Task UploadAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadWithResponseAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)); #endregion #region AbortMultipartUploads @@ -194,6 +310,13 @@ public partial interface ITransferUtility : IDisposable /// If the key is not specified in the request parameter, /// the file name will used as the key name. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads to improve performance + /// and returns response metadata. + /// + /// /// /// Contains all the parameters required to download an Amazon S3 object. /// @@ -203,6 +326,188 @@ public partial interface ITransferUtility : IDisposable /// The task object representing the asynchronous operation. Task DownloadAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Downloads the content from Amazon S3 and writes it to the specified file, returning response metadata. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// + /// The file path where the downloaded content will be written. + /// + /// + /// The name of the bucket containing the Amazon S3 object to download. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Downloads the content from Amazon S3 based on the request and returns response metadata. + /// To track the progress of the download, add an event listener to the request's WriteObjectProgressEvent. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityDownloadRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// FilePath = "local-file.txt", + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize + /// }; + /// var response = await transferUtility.DownloadWithResponseAsync(request); + /// + /// + /// + /// Contains all the parameters required to download an Amazon S3 object. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadWithResponseAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by s3Directory and returns response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// The name of the bucket containing the Amazon S3 objects to download. + /// + /// + /// The directory in Amazon S3 to download. + /// + /// + /// The local directory to download the objects to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadDirectoryWithResponseAsync(string bucketName, string s3Directory, string localDirectory, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by the S3Directory property and returns response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// Contains all the parameters required to download objects from Amazon S3 + /// into a local directory. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with download response metadata. + Task DownloadDirectoryWithResponseAsync(TransferUtilityDownloadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion #region OpenStream @@ -211,6 +516,13 @@ public partial interface ITransferUtility : IDisposable /// Amazon S3 bucket and key. /// The caller of this method is responsible for closing the stream. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// The name of the bucket. /// @@ -228,6 +540,13 @@ public partial interface ITransferUtility : IDisposable /// specified by the TransferUtilityOpenStreamRequest. /// The caller of this method is responsible for closing the stream. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// Contains all the parameters required for the OpenStream operation. /// @@ -237,6 +556,166 @@ public partial interface ITransferUtility : IDisposable /// The task object representing the asynchronous operation. Task OpenStreamAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Returns a stream from which the caller can read the content from the specified + /// Amazon S3 bucket and key, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// The name of the bucket. + /// + /// + /// The object key. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with response metadata. + Task OpenStreamWithResponseAsync(string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Returns a stream to read the contents from Amazon S3 as + /// specified by the TransferUtilityOpenStreamRequest, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityOpenStreamRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize + /// }; + /// var response = await transferUtility.OpenStreamWithResponseAsync(request); + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// Contains all the parameters required for the OpenStreamWithResponse operation. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with response metadata. + Task OpenStreamWithResponseAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion } } diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index 35205ad93f3a..88d36cba3dff 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -55,120 +55,21 @@ namespace Amazon.S3.Transfer public partial class TransferUtility : ITransferUtility { #region Upload - /// - /// Uploads the specified file. - /// The object key is derived from the file's name. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName); await UploadAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads the specified file. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName,key); await UploadAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads the contents of the specified stream. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize - /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) - /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. - /// - /// - /// - /// The stream to read to obtain the content to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(stream, bucketName, key); @@ -177,37 +78,7 @@ public partial class TransferUtility : ITransferUtility - /// - /// Uploads the file or stream specified by the request. - /// To track the progress of the upload, - /// add an event listener to the request's UploadProgressEvent. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. - /// - /// - /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory - /// until the final part is reached and complete the upload. The part size buffer for the multipart upload is controlled by the partSize - /// specified on the TransferUtilityUploadRequest, and if none is specified it defaults to S3Constants.MinPartSize (5 megabytes). - /// You can also adjust the read buffer size (i.e. how many bytes to read before adding it to the - /// part buffer) via the BufferSize property on the ClientConfig. The default value for this is 8192 bytes. - /// - /// - /// - /// Contains all the parameters required to upload to Amazon S3. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(UploadAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -217,22 +88,42 @@ public partial class TransferUtility : ITransferUtility await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); } } + + /// + public async Task UploadWithResponseAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadWithResponseAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(stream, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadWithResponseAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(UploadWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "Upload"); + var command = GetUploadCommand(request, null); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } #endregion #region AbortMultipartUploads - /// - /// Aborts the multipart uploads that were initiated before the specified date. - /// - /// - /// The name of the bucket containing multipart uploads. - /// - /// - /// The date before which the multipart uploads were initiated. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task AbortMultipartUploadsAsync(string bucketName, DateTime initiatedDate, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(AbortMultipartUploadsAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -248,16 +139,7 @@ public partial class TransferUtility : ITransferUtility } } - /// - /// Aborts the multipart uploads based on the specified request parameters. - /// - /// - /// Contains all the parameters required to abort multipart uploads. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task AbortMultipartUploadsAsync(TransferUtilityAbortMultipartUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(AbortMultipartUploadsAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -271,18 +153,7 @@ public partial class TransferUtility : ITransferUtility #region Download - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// If the key is not specified in the request parameter, - /// the file name will used as the key name. - /// - /// - /// Contains all the parameters required to download an Amazon S3 object. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(DownloadAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -293,24 +164,28 @@ public partial class TransferUtility : ITransferUtility } } + /// + public async Task DownloadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructDownloadRequest(filePath, bucketName, key); + return await DownloadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task DownloadWithResponseAsync(TransferUtilityDownloadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(DownloadWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "Download"); + var command = new MultipartDownloadCommand(this._s3Client, request, this._config); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + #endregion #region OpenStream - /// - /// Returns a stream from which the caller can read the content from the specified - /// Amazon S3 bucket and key. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// The name of the bucket. - /// - /// - /// The object key. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task OpenStreamAsync(string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { TransferUtilityOpenStreamRequest request = new TransferUtilityOpenStreamRequest() @@ -321,18 +196,7 @@ public partial class TransferUtility : ITransferUtility return await OpenStreamAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Returns a stream to read the contents from Amazon S3 as - /// specified by the TransferUtilityOpenStreamRequest. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// Contains all the parameters required for the OpenStream operation. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task OpenStreamAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(OpenStreamAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -344,9 +208,54 @@ public partial class TransferUtility : ITransferUtility } } + /// + public async Task OpenStreamWithResponseAsync(string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + TransferUtilityOpenStreamRequest request = new TransferUtilityOpenStreamRequest() + { + BucketName = bucketName, + Key = key + }; + return await OpenStreamWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task OpenStreamWithResponseAsync(TransferUtilityOpenStreamRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(OpenStreamWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "OpenStreamWithResponse"); + OpenStreamWithResponseCommand command = new OpenStreamWithResponseCommand(this._s3Client, request, this._config); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(continueOnCapturedContext: false); + } + } + + #endregion + + #region DownloadDirectory + + /// + public async Task DownloadDirectoryWithResponseAsync(string bucketName, string s3Directory, string localDirectory, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructDownloadDirectoryRequest(bucketName, s3Directory, localDirectory); + return await DownloadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task DownloadDirectoryWithResponseAsync(TransferUtilityDownloadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(DownloadDirectoryWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "DownloadDirectory"); + var command = new DownloadDirectoryCommand(this._s3Client, request, this._config, true); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + #endregion - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) { validate(request); if (IsMultipartUpload(request)) diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs index 46eddc3d5793..a25a43c8b5a5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs @@ -145,6 +145,94 @@ public partial interface ITransferUtility /// The task object representing the asynchronous operation. Task UploadDirectoryAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A pattern used to identify the files from the source directory to upload. + /// + /// + /// A search option that specifies whether to recursively search for files to upload + /// in subdirectories. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The request that contains all the parameters required to upload a directory. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion #region DownloadDirectory @@ -152,6 +240,13 @@ public partial interface ITransferUtility /// Downloads the objects in Amazon S3 that have a key that starts with the value /// specified by s3Directory. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// The name of the bucket containing the Amazon S3 objects to download. /// @@ -172,6 +267,13 @@ public partial interface ITransferUtility /// specified by the S3Directory /// property of the passed in TransferUtilityDownloadDirectoryRequest object. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// Contains all the parameters required to download objects from Amazon S3 /// into a local directory. diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index a492f922a7d2..6f21007f148b 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -110,6 +110,85 @@ public partial interface ITransferUtility /// The request that contains all the parameters required to upload a directory. /// void UploadDirectory(TransferUtilityUploadDirectoryRequest request); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A pattern used to identify the files from the source directory to upload. + /// + /// + /// A search option that specifies whether to recursively search for files to upload + /// in subdirectories. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName, string searchPattern, SearchOption searchOption); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The request that contains all the parameters required to upload a directory. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(TransferUtilityUploadDirectoryRequest request); #endregion #region Upload @@ -224,6 +303,13 @@ public partial interface ITransferUtility /// Amazon S3 bucket and key. /// The caller of this method is responsible for closing the stream. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// The name of the bucket. /// @@ -240,6 +326,13 @@ public partial interface ITransferUtility /// specified by the TransferUtilityOpenStreamRequest. /// The caller of this method is responsible for closing the stream. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads from S3 and memory buffering to improve performance, + /// and also returns response metadata along with the stream. + /// + /// /// /// Contains all the parameters required to open a stream to an S3 object. /// @@ -248,12 +341,177 @@ public partial interface ITransferUtility /// Stream OpenStream(TransferUtilityOpenStreamRequest request); + /// + /// Returns a stream from which the caller can read the content from the specified + /// Amazon S3 bucket and key, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// The name of the bucket. + /// + /// + /// The object key. + /// + /// + /// A response containing the stream and metadata from the specified Amazon S3 bucket and key. + /// + TransferUtilityOpenStreamResponse OpenStreamWithResponse(string bucketName, string key); + + /// + /// Returns a stream to read the contents from Amazon S3 as + /// specified by the TransferUtilityOpenStreamRequest, along with response metadata. + /// The caller of this method is responsible for closing the stream. + /// + /// + /// + /// This method uses parallel downloads and intelligent buffering to significantly improve + /// throughput compared to the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are buffered in memory and served to your application as you read from the stream + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// Use to limit memory consumption by capping the number + /// of buffered parts in memory. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityOpenStreamRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize + /// }; + /// var response = transferUtility.OpenStreamWithResponse(request); + /// + /// + /// Memory Considerations: The buffering mechanism uses memory to store downloaded parts. + /// Adjust if you need to limit memory usage, + /// especially when downloading very large files or multiple files concurrently. + /// + /// + /// + /// Contains all the parameters required for the OpenStreamWithResponse operation. + /// + /// + /// A response containing the stream and metadata from Amazon S3. + /// + TransferUtilityOpenStreamResponse OpenStreamWithResponse(TransferUtilityOpenStreamRequest request); + #endregion #region Download /// /// Downloads the content from Amazon S3 and writes it to the specified file. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads to improve performance + /// and returns response metadata. + /// + /// /// /// The file path where the content from Amazon S3 will be written to. /// @@ -270,10 +528,158 @@ public partial interface ITransferUtility /// If the key is not specified in the request parameter, /// the file name will used as the key name. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses parallel downloads to improve performance + /// and returns response metadata. + /// + /// /// /// Contains all the parameters required to download an Amazon S3 object. /// void Download(TransferUtilityDownloadRequest request); + + /// + /// Downloads the content from Amazon S3 and writes it to the specified file, returning response metadata. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// + /// The file path where the downloaded content will be written. + /// + /// + /// The name of the bucket containing the Amazon S3 object to download. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// Response metadata including headers and version information from the download. + TransferUtilityDownloadResponse DownloadWithResponse(string filePath, string bucketName, string key); + + /// + /// Downloads the content from Amazon S3 based on the request and returns response metadata. + /// To track the progress of the download, add an event listener to the request's WriteObjectProgressEvent. + /// + /// + /// + /// This method uses parallel downloads to significantly improve throughput compared to + /// the standard method. + /// + /// + /// How it works: + /// + /// + /// For large objects, the download is automatically split into parts (default 8MB per part) + /// Multiple parts are downloaded concurrently using parallel requests to S3 + /// Downloaded parts are written directly to the file as they arrive + /// + /// + /// Multipart Download Strategy: + /// + /// + /// The property controls how parts are downloaded (default: MultipartDownloadType.PART): + /// + /// + /// PART (default): Uses the original part boundaries from when the object was uploaded with multipart upload. + /// This is more efficient as it aligns with S3's internal part structure, but requires that the object was uploaded using multipart upload. + /// The property is ignored in this mode. + /// RANGE: Uses range-based downloads with configurable part sizes via the property. + /// This works with any object (whether uploaded as single-part or multipart) and provides more flexibility in controlling download part sizes. + /// + /// + /// When to use PART vs RANGE: + /// + /// + /// Use PART mode (default) when you know the object was uploaded using multipart upload and want optimal performance. + /// Use RANGE mode when the object's upload method is unknown, when you need specific part sizes, or when downloading objects that were uploaded as a single part. + /// + /// + /// Configuration Options: + /// + /// + /// You can customize the download behavior using : + /// + /// + /// var config = new TransferUtilityConfig + /// { + /// // Control how many parts download in parallel (default: 10) + /// ConcurrentServiceRequests = 20 + /// }; + /// var transferUtility = new TransferUtility(s3Client, config); + /// + /// + /// Use to control parallel download threads. + /// + /// + /// You can also customize the part size per request using : + /// + /// + /// var request = new TransferUtilityDownloadRequest + /// { + /// BucketName = "my-bucket", + /// Key = "my-key", + /// FilePath = "local-file.txt", + /// PartSize = 16 * 1024 * 1024, // Use 16MB parts instead of default 8MB + /// MultipartDownloadType = MultipartDownloadType.RANGE // Enable RANGE mode to use custom PartSize + /// }; + /// var response = transferUtility.DownloadWithResponse(request); + /// + /// + /// + /// Contains all the parameters required to download an Amazon S3 object. + /// + /// Response metadata including headers and version information from the download. + TransferUtilityDownloadResponse DownloadWithResponse(TransferUtilityDownloadRequest request); #endregion #region DownloadDirectory @@ -281,6 +687,13 @@ public partial interface ITransferUtility /// Downloads the objects in Amazon S3 that have a key that starts with the value /// specified by s3Directory. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// The name of the bucket containing the Amazon S3 objects to download. /// @@ -297,11 +710,48 @@ public partial interface ITransferUtility /// specified by the S3Directory /// property of the passed in TransferUtilityDownloadDirectoryRequest object. /// + /// + /// + /// Note: Consider using + /// instead. The newer operation uses enhanced multipart download with concurrent part downloads + /// for improved performance and returns response metadata including the total number of objects downloaded. + /// + /// /// /// Contains all the parameters required to download objects from Amazon S3 /// into a local directory. /// void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request); + + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by s3Directory, returning response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// The name of the bucket containing the Amazon S3 objects to download. + /// + /// + /// The directory in Amazon S3 to download. + /// + /// + /// The local directory to download the objects to. + /// + /// Response metadata including the number of objects downloaded. + TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(string bucketName, string s3Directory, string localDirectory); + + /// + /// Downloads the objects in Amazon S3 that have a key that starts with the value + /// specified by the S3Directory property of the passed in + /// TransferUtilityDownloadDirectoryRequest object, returning response metadata. + /// Uses enhanced multipart download with concurrent part downloads for improved performance. + /// + /// + /// Contains all the parameters required to download objects from Amazon S3 + /// into a local directory. + /// + /// Response metadata including the number of objects downloaded. + TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(TransferUtilityDownloadDirectoryRequest request); #endregion #region AbortMultipartUploads @@ -318,5 +768,113 @@ public partial interface ITransferUtility void AbortMultipartUploads(string bucketName, DateTime initiatedDate); #endregion + + #region UploadWithResponse + + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName); + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key); + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key); + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// The upload response metadata. + TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request); + + #endregion } } diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs index 81d2d4b43351..6cda6c5c6194 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs @@ -54,103 +54,21 @@ public partial class TransferUtility : ITransferUtility { #region UploadDirectory - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadDirectoryRequest(directory, bucketName); await UploadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A pattern used to identify the files from the source directory to upload. - /// - /// - /// A search option that specifies whether to recursively search for files to upload - /// in subdirectories. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadDirectoryRequest(directory, bucketName, searchPattern, searchOption); await UploadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The request that contains all the parameters required to upload a directory. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(UploadDirectoryAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -163,45 +81,44 @@ public partial class TransferUtility : ITransferUtility } } + /// + public async Task UploadDirectoryWithResponseAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadDirectoryRequest(directory, bucketName); + return await UploadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadDirectoryWithResponseAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadDirectoryRequest(directory, bucketName, searchPattern, searchOption); + return await UploadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadDirectoryWithResponseAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(UploadDirectoryWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "UploadDirectory"); + validate(request); + UploadDirectoryCommand command = new UploadDirectoryCommand(this, this._config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + #endregion #region DownloadDirectory - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by s3Directory. - /// - /// - /// The name of the bucket containing the Amazon S3 objects to download. - /// - /// - /// The directory in Amazon S3 to download. - /// - /// - /// The local directory to download the objects to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadDirectoryAsync(string bucketName, string s3Directory, string localDirectory, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructDownloadDirectoryRequest(bucketName, s3Directory, localDirectory); await DownloadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by the S3Directory - /// property of the passed in TransferUtilityDownloadDirectoryRequest object. - /// - /// - /// Contains all the parameters required to download objects from Amazon S3 - /// into a local directory. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadDirectoryAsync(TransferUtilityDownloadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(DownloadDirectoryAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -215,22 +132,7 @@ public partial class TransferUtility : ITransferUtility #endregion #region Download - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// - /// - /// The file path where the content from Amazon S3 will be written to. - /// - /// - /// The name of the bucket containing the Amazon S3 object to download. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructDownloadRequest(filePath, bucketName, key); diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index f1ff62ce820d..05f5c2cac349 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -31,29 +31,7 @@ namespace Amazon.S3.Transfer public partial class TransferUtility : ITransferUtility { #region UploadDirectory - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// + /// public void UploadDirectory(string directory, string bucketName) { try @@ -67,36 +45,7 @@ public void UploadDirectory(string directory, string bucketName) } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A pattern used to identify the files from the source directory to upload. - /// - /// - /// A search option that specifies whether to recursively search for files to upload - /// in subdirectories. - /// + /// public void UploadDirectory(string directory, string bucketName, string searchPattern, SearchOption searchOption) { try @@ -109,26 +58,7 @@ public void UploadDirectory(string directory, string bucketName, string searchPa } } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The request that contains all the parameters required to upload a directory. - /// + /// public void UploadDirectory(TransferUtilityUploadDirectoryRequest request) { try @@ -140,33 +70,29 @@ public void UploadDirectory(TransferUtilityUploadDirectoryRequest request) ExceptionDispatchInfo.Capture(e.InnerException).Throw(); } } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName) + { + return UploadDirectoryWithResponseAsync(directory, bucketName).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName, string searchPattern, SearchOption searchOption) + { + return UploadDirectoryWithResponseAsync(directory, bucketName, searchPattern, searchOption).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(TransferUtilityUploadDirectoryRequest request) + { + return UploadDirectoryWithResponseAsync(request).GetAwaiter().GetResult(); + } #endregion #region Upload - /// - /// Uploads the specified file. - /// The object key is derived from the file's name. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// + /// public void Upload(string filePath, string bucketName) { try @@ -179,31 +105,7 @@ public void Upload(string filePath, string bucketName) } } - /// - /// Uploads the specified file. - /// Multiple threads are used to read the file and perform multiple uploads in parallel. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The file path of the file to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// + /// public void Upload(string filePath, string bucketName, string key) { try @@ -217,30 +119,7 @@ public void Upload(string filePath, string bucketName, string key) } - /// - /// Uploads the contents of the specified stream. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The stream to read to obtain the content to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// + /// public void Upload(Stream stream, string bucketName, string key) { try @@ -253,26 +132,7 @@ public void Upload(Stream stream, string bucketName, string key) } } - /// - /// Uploads the file or stream specified by the request. - /// To track the progress of the upload, - /// add an event listener to the request's UploadProgressEvent. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// Contains all the parameters required to upload to Amazon S3. - /// + /// public void Upload(TransferUtilityUploadRequest request) { try @@ -285,24 +145,35 @@ public void Upload(TransferUtilityUploadRequest request) } } + /// + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName) + { + return UploadWithResponseAsync(filePath, bucketName).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key) + { + return UploadWithResponseAsync(filePath, bucketName, key).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key) + { + return UploadWithResponseAsync(stream, bucketName, key).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request) + { + return UploadWithResponseAsync(request).GetAwaiter().GetResult(); + } + #endregion #region OpenStream - /// - /// Returns a stream from which the caller can read the content from the specified - /// Amazon S3 bucket and key. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// The name of the bucket. - /// - /// - /// The object key. - /// - /// - /// A stream of the contents from the specified Amazon S3 and key. - /// + /// public Stream OpenStream(string bucketName, string key) { try @@ -316,17 +187,7 @@ public Stream OpenStream(string bucketName, string key) } } - /// - /// Returns a stream to read the contents from Amazon S3 as - /// specified by the TransferUtilityOpenStreamRequest. - /// The caller of this method is responsible for closing the stream. - /// - /// - /// Contains all the parameters required to open a stream to an S3 object. - /// - /// - /// A stream of the contents from Amazon S3. - /// + /// public Stream OpenStream(TransferUtilityOpenStreamRequest request) { try @@ -340,21 +201,22 @@ public Stream OpenStream(TransferUtilityOpenStreamRequest request) } } + /// + public TransferUtilityOpenStreamResponse OpenStreamWithResponse(string bucketName, string key) + { + return OpenStreamWithResponseAsync(bucketName, key).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityOpenStreamResponse OpenStreamWithResponse(TransferUtilityOpenStreamRequest request) + { + return OpenStreamWithResponseAsync(request).GetAwaiter().GetResult(); + } + #endregion #region Download - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// - /// - /// The file path where the content from Amazon S3 will be written to. - /// - /// - /// The name of the bucket containing the Amazon S3 object to download. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// + /// public void Download(string filePath, string bucketName, string key) { try @@ -367,14 +229,7 @@ public void Download(string filePath, string bucketName, string key) } } - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// If the key is not specified in the request parameter, - /// the file name will used as the key name. - /// - /// - /// Contains all the parameters required to download an Amazon S3 object. - /// + /// public void Download(TransferUtilityDownloadRequest request) { try @@ -386,22 +241,22 @@ public void Download(TransferUtilityDownloadRequest request) ExceptionDispatchInfo.Capture(e.InnerException).Throw(); } } + + /// + public TransferUtilityDownloadResponse DownloadWithResponse(string filePath, string bucketName, string key) + { + return DownloadWithResponseAsync(filePath, bucketName, key).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityDownloadResponse DownloadWithResponse(TransferUtilityDownloadRequest request) + { + return DownloadWithResponseAsync(request).GetAwaiter().GetResult(); + } #endregion #region DownloadDirectory - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by s3Directory. - /// - /// - /// The name of the bucket containing the Amazon S3 objects to download. - /// - /// - /// The directory in Amazon S3 to download. - /// - /// - /// The local directory to download the objects to. - /// + /// public void DownloadDirectory(string bucketName, string s3Directory, string localDirectory) { try @@ -414,15 +269,7 @@ public void DownloadDirectory(string bucketName, string s3Directory, string loca } } - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by the S3Directory - /// property of the passed in TransferUtilityDownloadDirectoryRequest object. - /// - /// - /// Contains all the parameters required to download objects from Amazon S3 - /// into a local directory. - /// + /// public void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request) { try @@ -434,19 +281,23 @@ public void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request) ExceptionDispatchInfo.Capture(e.InnerException).Throw(); } } + + /// + public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(string bucketName, string s3Directory, string localDirectory) + { + return DownloadDirectoryWithResponseAsync(bucketName, s3Directory, localDirectory).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(TransferUtilityDownloadDirectoryRequest request) + { + return DownloadDirectoryWithResponseAsync(request).GetAwaiter().GetResult(); + } #endregion #region AbortMultipartUploads - /// - /// Aborts the multipart uploads that were initiated before the specified date. - /// - /// - /// The name of the bucket containing multipart uploads. - /// - /// - /// The date before which the multipart uploads were initiated. - /// + /// public void AbortMultipartUploads(string bucketName, DateTime initiatedDate) { try @@ -459,12 +310,7 @@ public void AbortMultipartUploads(string bucketName, DateTime initiatedDate) } } - /// - /// Aborts the multipart uploads based on the specified request parameters. - /// - /// - /// Contains all the parameters required to abort multipart uploads. - /// + /// public void AbortMultipartUploads(TransferUtilityAbortMultipartUploadRequest request) { try diff --git a/sdk/src/Services/S3/Custom/Util/ContentRangeParser.cs b/sdk/src/Services/S3/Custom/Util/ContentRangeParser.cs new file mode 100644 index 000000000000..ba5aff959e4c --- /dev/null +++ b/sdk/src/Services/S3/Custom/Util/ContentRangeParser.cs @@ -0,0 +1,90 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using System; + +namespace Amazon.S3.Util +{ + /// + /// Utility for parsing S3 ContentRange headers. + /// Format: "bytes {start}-{end}/{total}" + /// Example: "bytes 0-5242879/52428800" + /// + internal static class ContentRangeParser + { + /// + /// Parses ContentRange header into its components. + /// + /// ContentRange header value (e.g., "bytes 0-1023/2048") + /// Tuple of (startByte, endByte, totalSize) + /// If ContentRange format is invalid + public static (long startByte, long endByte, long totalSize) Parse(string contentRange) + { + if (string.IsNullOrEmpty(contentRange)) + throw new InvalidOperationException("Content-Range header is missing"); + + // Format: "bytes {start}-{end}/{total-size}" + // Remove "bytes " prefix if present + var parts = contentRange.Replace("bytes ", "").Split('/'); + if (parts.Length != 2) + throw new InvalidOperationException($"Invalid ContentRange format: {contentRange}"); + + // Parse byte range (start-end) + var rangeParts = parts[0].Split('-'); + if (rangeParts.Length != 2 || + !long.TryParse(rangeParts[0], out var startByte) || + !long.TryParse(rangeParts[1], out var endByte)) + throw new InvalidOperationException($"Unable to parse ContentRange byte range: {contentRange}"); + + // Parse total size - S3 always returns exact sizes, never wildcards + if (parts[1] == "*") + throw new InvalidOperationException($"Unexpected wildcard in ContentRange total size: {contentRange}. S3 always returns exact object sizes."); + if (!long.TryParse(parts[1], out var totalSize)) + throw new InvalidOperationException($"Unable to parse ContentRange total size: {contentRange}"); + + return (startByte, endByte, totalSize); + } + + /// + /// Extracts just the start byte position from ContentRange. + /// + /// ContentRange header value + /// Start byte position + /// If ContentRange format is invalid + public static long GetStartByte(string contentRange) + { + var (startByte, _, _) = Parse(contentRange); + return startByte; + } + + /// + /// Extracts just the total size from ContentRange. + /// + /// ContentRange header value + /// Total object size in bytes + /// If ContentRange format is invalid + public static long GetTotalSize(string contentRange) + { + var (_, _, totalSize) = Parse(contentRange); + return totalSize; + } + } +} diff --git a/sdk/src/Services/S3/Properties/AssemblyInfo.cs b/sdk/src/Services/S3/Properties/AssemblyInfo.cs index bf64960f8872..980e732be31d 100644 --- a/sdk/src/Services/S3/Properties/AssemblyInfo.cs +++ b/sdk/src/Services/S3/Properties/AssemblyInfo.cs @@ -21,6 +21,7 @@ [assembly: InternalsVisibleTo("AWSSDK.UnitTests.S3.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] [assembly: InternalsVisibleTo("AWSSDK.UnitTests.NetFramework, PublicKey=0024000004800000940000000602000000240000525341310004000001000100db5f59f098d27276c7833875a6263a3cc74ab17ba9a9df0b52aedbe7252745db7274d5271fd79c1f08f668ecfa8eaab5626fa76adc811d3c8fc55859b0d09d3bc0a84eecd0ba891f2b8a2fc55141cdcc37c2053d53491e650a479967c3622762977900eddbf1252ed08a2413f00a28f3a0752a81203f03ccb7f684db373518b4")] +[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyProduct("Amazon Web Services SDK for .NET")] [assembly: AssemblyCompany("Amazon.com, Inc")] diff --git a/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj b/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj index 09d7ecd49090..fcf55937873e 100644 --- a/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj +++ b/sdk/test/Services/S3/IntegrationTests/AWSSDK.IntegrationTests.S3.NetFramework.csproj @@ -1,67 +1,67 @@  - net472 - $(DefineConstants);DEBUG;TRACE;BCL;ASYNC_AWAIT;LOCAL_FILE - portable - false - AWSSDK.IntegrationTests.S3.NetFramework - AWSSDK.IntegrationTests.S3.NetFramework + net472 + $(DefineConstants);DEBUG;TRACE;BCL;ASYNC_AWAIT;LOCAL_FILE + portable + false + AWSSDK.IntegrationTests.S3.NetFramework + AWSSDK.IntegrationTests.S3.NetFramework - false - false - false - false - false - false - false - false - true - true - CS1591,CS0612,CS0618 + false + false + false + false + false + false + false + false + true + true + CS1591,CS0612,CS0618 - - - + + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - - - - + + + + + - + - + \ No newline at end of file diff --git a/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs b/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs index 6cba8fc6b989..f4c8c103e126 100644 --- a/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/GetObjectTests.cs @@ -246,5 +246,50 @@ public void TestContentLanguageResponseHeaderOverride() "Original ContentLanguage should still be stored when no override is specified"); } } + + [TestMethod] + [TestCategory("S3")] + public void TestContentLanguageHeadersCollection() + { + var key = "TestContentLanguageHeadersCollection"; + var expectedLanguage = "de-DE"; + + // Put object with Content-Language header + var putRequest = new PutObjectRequest + { + BucketName = bucketName, + Key = key, + ContentBody = "Test content for Content-Language headers collection" + }; + putRequest.Headers["Content-Language"] = expectedLanguage; + + Client.PutObject(putRequest); + + // Get object and verify both ContentLanguage properties are set + var response = Client.GetObject(new GetObjectRequest + { + BucketName = bucketName, + Key = key + }); + + using (response) + { + // Verify the direct ContentLanguage property + Assert.IsNotNull(response.ContentLanguage, + "ContentLanguage property should not be null"); + Assert.AreEqual(expectedLanguage, response.ContentLanguage, + "ContentLanguage property should match the value set during PutObject"); + + // Verify the Headers.ContentLanguage property + Assert.IsNotNull(response.Headers.ContentLanguage, + "Headers.ContentLanguage property should not be null"); + Assert.AreEqual(expectedLanguage, response.Headers.ContentLanguage, + "Headers.ContentLanguage property should match the value set during PutObject"); + + // Verify both properties have the same value + Assert.AreEqual(response.ContentLanguage, response.Headers.ContentLanguage, + "ContentLanguage and Headers.ContentLanguage should have the same value"); + } + } } } diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryLifecycleTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryLifecycleTests.cs new file mode 100644 index 000000000000..a3f4fa660324 --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryLifecycleTests.cs @@ -0,0 +1,320 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility download directory lifecycle events. + /// Tests the initiated, completed, and failed events for directory downloads. + /// + [TestClass] + public class TransferUtilityDownloadDirectoryLifecycleTests : TestBase + { + public static readonly long MEG_SIZE = (int)Math.Pow(2, 20); + public static readonly long KILO_SIZE = (int)Math.Pow(2, 10); + public static readonly string BasePath = Path.Combine(Path.GetTempPath(), "transferutility", "downloaddirectorylifecycle"); + + private static string bucketName; + private static string plainTextContentType = "text/plain"; + + [ClassInitialize()] + public static void ClassInitialize(TestContext a) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + BaseClean(); + if (Directory.Exists(BasePath)) + { + Directory.Delete(BasePath, true); + } + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryInitiatedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Request.BucketName); + Assert.IsNotNull(args.Request.S3Directory); + Assert.IsNotNull(args.Request.LocalDirectory); + } + }; + DownloadDirectoryWithLifecycleEvents(10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryCompletedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + + // Verify progress information is available in completed event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles, "All files should be transferred"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes, "All bytes should be transferred"); + } + }; + DownloadDirectoryWithLifecycleEvents(12 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryFailedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + } + }; + + // Use an invalid bucket name to force a real exception + // Bucket names with uppercase letters are invalid and will cause an exception + var invalidBucketName = "INVALID-BUCKET-NAME-" + Guid.NewGuid().ToString(); + var testDirectory = "test-directory"; + + var localDirectory = GenerateDirectoryPath(); + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = invalidBucketName, // This will cause an exception due to invalid bucket name + LocalDirectory = localDirectory, + S3Directory = testDirectory + }; + + request.DownloadDirectoryFailedEvent += eventValidator.OnEventFired; + + try + { + transferUtility.DownloadDirectory(request); + Assert.Fail("Expected an exception to be thrown for invalid bucket name"); + } + catch (Exception ex) + { + // Expected exception - the failed event should have been fired + Console.WriteLine($"Expected exception caught: {ex.GetType().Name} - {ex.Message}"); + } + + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void DownloadDirectoryCompleteLifecycleTest() + { + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(bucketName, args.Request.BucketName); + Assert.IsNotNull(args.Request.S3Directory); + Assert.IsNotNull(args.Request.LocalDirectory); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.IsTrue(args.TotalFiles > 0, "Should have downloaded at least one file"); + } + }; + + DownloadDirectoryWithLifecycleEvents(15 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + + #region Helper Methods + + void DownloadDirectoryWithLifecycleEvents(long fileSize, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + // First create and upload a test directory + var testDirectory = CreateAndUploadTestDirectory(fileSize); + var s3Directory = testDirectory.Name; + + DownloadDirectoryWithLifecycleEventsAndS3Directory(s3Directory, initiatedValidator, completedValidator, failedValidator); + } + + void DownloadDirectoryWithLifecycleEventsAndS3Directory(string s3Directory, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var localDirectory = GenerateDirectoryPath(); + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + LocalDirectory = localDirectory, + S3Directory = s3Directory + }; + + if (initiatedValidator != null) + { + request.DownloadDirectoryInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.DownloadDirectoryCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.DownloadDirectoryFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.DownloadDirectory(request); + + // Validate downloaded directory contents if it was successful + if (Directory.Exists(localDirectory)) + { + var downloadedFiles = Directory.GetFiles(localDirectory, "*", SearchOption.AllDirectories); + Console.WriteLine($"Downloaded {downloadedFiles.Length} files to {localDirectory}"); + } + } + + DirectoryInfo CreateAndUploadTestDirectory(long fileSize, int numberOfTestFiles = 3) + { + var directory = CreateTestDirectory(fileSize, numberOfTestFiles); + var keyPrefix = directory.Name; + var directoryPath = directory.FullName; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = directoryPath, + KeyPrefix = keyPrefix, + ContentType = plainTextContentType, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + }; + + transferUtility.UploadDirectory(request); + + // Validate the upload was successful + ValidateDirectoryContentsInS3(Client, bucketName, keyPrefix, directory); + + return directory; + } + + public static DirectoryInfo CreateTestDirectory(long fileSize = 0, int numberOfTestFiles = 3) + { + if (fileSize == 0) + fileSize = 1 * MEG_SIZE; + + var directoryPath = GenerateDirectoryPath(); + for (int i = 0; i < numberOfTestFiles; i++) + { + var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); + UtilityMethods.GenerateFile(filePath, fileSize); + } + + return new DirectoryInfo(directoryPath); + } + + public static string GenerateDirectoryPath(string baseName = "DownloadDirectoryLifecycleTest") + { + var directoryName = UtilityMethods.GenerateName(baseName); + var directoryPath = Path.Combine(BasePath, directoryName); + return directoryPath; + } + + public static void ValidateDirectoryContentsInS3(IAmazonS3 s3client, string bucketName, string keyPrefix, DirectoryInfo sourceDirectory) + { + var directoryPath = sourceDirectory.FullName; + var files = sourceDirectory.GetFiles("*", SearchOption.AllDirectories); + foreach (var file in files) + { + var filePath = file.FullName; + var relativePath = filePath.Substring(directoryPath.Length + 1); + var key = (!string.IsNullOrEmpty(keyPrefix) ? keyPrefix + "/" : string.Empty) + relativePath.Replace("\\", "/"); + + // Verify the object exists in S3 + var metadata = s3client.GetObjectMetadata(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + Assert.IsNotNull(metadata, $"Object {key} should exist in S3"); + } + } + + #endregion + + #region Shared Helper Classes + + class TransferLifecycleEventValidator + { + public Action Validate { get; set; } + public bool EventFired { get; private set; } + public Exception EventException { get; private set; } + + public void OnEventFired(object sender, T eventArgs) + { + try + { + Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); + Validate?.Invoke(eventArgs); + EventFired = true; // Only set if validation passes + } + catch (Exception ex) + { + EventException = ex; + EventFired = false; // Ensure we don't mark as fired on failure + Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); + // Don't re-throw, let AssertEventFired() handle it + } + } + + public void AssertEventFired() + { + if (EventException != null) + throw EventException; + Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryWithResponseTests.cs new file mode 100644 index 000000000000..eaf593f72dbf --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadDirectoryWithResponseTests.cs @@ -0,0 +1,632 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.DownloadDirectoryWithResponseAsync functionality. + /// These tests verify end-to-end functionality with actual S3 operations and directory I/O. + /// + /// These integration tests focus on: + /// - Basic directory downloads with response object + /// - Progress tracking with response + /// - Multipart downloads in directory context + /// - Concurrent vs sequential downloads + /// - Nested directory structures + /// - Response validation + /// + [TestClass] + public class TransferUtilityDownloadDirectoryWithResponseTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static readonly long KB = 1024; + private static string bucketName; + private static string tempDirectory; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + tempDirectory = Path.Combine(Path.GetTempPath(), "S3DownloadDirectoryTests-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempDirectory); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + + // Clean up temp directory + if (Directory.Exists(tempDirectory)) + { + try + { + Directory.Delete(tempDirectory, recursive: true); + } + catch + { + // Best effort cleanup + } + } + + BaseClean(); + } + + [TestCleanup] + public void TestCleanup() + { + // Clean up any test directories after each test + if (Directory.Exists(tempDirectory)) + { + foreach (var subDir in Directory.GetDirectories(tempDirectory)) + { + try + { + Directory.Delete(subDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + #region Basic Download Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_BasicDownload_ReturnsCorrectResponse() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("basic-download"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 5; + + // Upload test directory + await UploadTestDirectory(keyPrefix, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsDownloaded, "ObjectsDownloaded should match file count"); + + // Verify all files were downloaded + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length, "Downloaded file count should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_EmptyDirectory_ReturnsZeroObjectsDownloaded() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("empty-directory"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + // Act - Download non-existent directory + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(0, response.ObjectsDownloaded, "ObjectsDownloaded should be 0 for empty directory"); + + // Directory may or may not exist, but should have no files + if (Directory.Exists(downloadPath)) + { + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(0, downloadedFiles.Length, "No files should be downloaded"); + } + } + + #endregion + + #region Progress Tracking Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_WithProgressTracking_FiresProgressEvents() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("progress-tracking"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 3; + + await UploadTestDirectory(keyPrefix, 5 * MB, fileCount); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + Assert.IsTrue(progressEvents.Count > 0, "Progress events should have fired"); + + // Verify final progress event + var finalEvent = progressEvents.Last(); + Assert.AreEqual(fileCount, finalEvent.NumberOfFilesDownloaded); + Assert.AreEqual(fileCount, finalEvent.TotalNumberOfFiles); + Assert.AreEqual(finalEvent.TransferredBytes, finalEvent.TotalBytes); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_SequentialMode_IncludesCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-progress"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + await UploadTestDirectory(keyPrefix, 3 * MB, 3); + + var progressEvents = new List(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = false // Sequential mode + }; + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded); + + // In sequential mode, should have CurrentFile populated + var eventsWithFile = progressEvents.Where(e => e.CurrentFile != null).ToList(); + Assert.IsTrue(eventsWithFile.Count > 0, "Should have events with CurrentFile populated"); + + foreach (var evt in eventsWithFile) + { + Assert.IsNotNull(evt.CurrentFile); + Assert.IsTrue(evt.TotalNumberOfBytesForCurrentFile > 0); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_ConcurrentMode_OmitsCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-progress"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + await UploadTestDirectory(keyPrefix, 3 * MB, 4); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = true // Concurrent mode + }; + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(4, response.ObjectsDownloaded); + Assert.IsTrue(progressEvents.Count > 0); + + // In concurrent mode, CurrentFile should be null + foreach (var evt in progressEvents) + { + Assert.IsNull(evt.CurrentFile, "CurrentFile should be null in concurrent mode"); + Assert.AreEqual(0, evt.TransferredBytesForCurrentFile); + Assert.AreEqual(0, evt.TotalNumberOfBytesForCurrentFile); + } + } + + #endregion + + #region Multipart Download Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + [TestCategory("Multipart")] + public async Task DownloadDirectoryWithResponse_WithMultipartFiles_DownloadsSuccessfully() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("multipart-directory"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 3; + + // Upload directory with large files to trigger multipart (>16MB threshold) + await UploadTestDirectory(keyPrefix, 20 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + + // Verify all files downloaded with correct sizes + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length); + + foreach (var file in downloadedFiles) + { + var fileInfo = new FileInfo(file); + Assert.AreEqual(20 * MB, fileInfo.Length, $"File {file} should be 20MB"); + } + + VerifyNoTempFilesExist(downloadPath); + } + + #endregion + + #region Nested Directory Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_NestedDirectories_PreservesStructure() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("nested-structure"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + // Upload nested directory structure + var nestedFiles = new Dictionary + { + { "level1/file1.txt", 1 * MB }, + { "level1/level2/file2.txt", 2 * MB }, + { "level1/level2/level3/file3.txt", 3 * MB } + }; + + await UploadTestFilesWithStructure(keyPrefix, nestedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(nestedFiles.Count, response.ObjectsDownloaded); + + // Verify directory structure + foreach (var kvp in nestedFiles) + { + var expectedPath = Path.Combine(downloadPath, kvp.Key.Replace('/', Path.DirectorySeparatorChar)); + Assert.IsTrue(File.Exists(expectedPath), $"File should exist: {expectedPath}"); + + var fileInfo = new FileInfo(expectedPath); + Assert.AreEqual(kvp.Value, fileInfo.Length, $"File size should match: {expectedPath}"); + } + } + + #endregion + + #region Concurrent vs Sequential Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_ConcurrentMode_DownloadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-download"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 10; + + await UploadTestDirectory(keyPrefix, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = true + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_SequentialMode_DownloadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-download"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + var fileCount = 5; + + await UploadTestDirectory(keyPrefix, 3 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath, + DownloadFilesConcurrently = false + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsDownloaded); + + var downloadedFiles = Directory.GetFiles(downloadPath, "*", SearchOption.AllDirectories); + Assert.AreEqual(fileCount, downloadedFiles.Length); + } + + #endregion + + #region Mixed File Size Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("DownloadDirectory")] + public async Task DownloadDirectoryWithResponse_MixedFileSizes_DownloadsAll() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("mixed-sizes"); + var downloadPath = Path.Combine(tempDirectory, keyPrefix + "-download"); + + var mixedFiles = new Dictionary + { + { "tiny.txt", 100 }, // 100 bytes + { "small.txt", 512 * KB }, // 512 KB + { "medium.txt", 5 * MB }, // 5 MB + { "large.txt", 20 * MB } // 20 MB (multipart) + }; + + await UploadTestFilesWithStructure(keyPrefix, mixedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = keyPrefix, + LocalDirectory = downloadPath + }; + + var response = await transferUtility.DownloadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(mixedFiles.Count, response.ObjectsDownloaded); + + // Verify each file's size + foreach (var kvp in mixedFiles) + { + var filePath = Path.Combine(downloadPath, kvp.Key); + Assert.IsTrue(File.Exists(filePath), $"File should exist: {filePath}"); + + var fileInfo = new FileInfo(filePath); + Assert.AreEqual(kvp.Value, fileInfo.Length, $"File size should match: {filePath}"); + } + } + + #endregion + + #region Helper Methods + + /// + /// Uploads a test directory with specified number of files using TransferUtility.UploadDirectory + /// + private static async Task UploadTestDirectory(string keyPrefix, long fileSize, int fileCount) + { + // Create local temp directory structure + var tempUploadDir = Path.Combine(Path.GetTempPath(), "upload-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempUploadDir); + + try + { + // Generate files locally + for (int i = 0; i < fileCount; i++) + { + var fileName = $"file{i}.dat"; + var localPath = Path.Combine(tempUploadDir, fileName); + UtilityMethods.GenerateFile(localPath, fileSize); + } + + // Upload entire directory using TransferUtility + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = tempUploadDir, + KeyPrefix = keyPrefix, + SearchPattern = "*.dat", // Only match test data files, not system files + SearchOption = SearchOption.AllDirectories + }; + + await transferUtility.UploadDirectoryAsync(request); + } + finally + { + // Cleanup temp directory + if (Directory.Exists(tempUploadDir)) + { + try + { + Directory.Delete(tempUploadDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + /// + /// Uploads test files with specific structure using TransferUtility.UploadDirectory + /// + private static async Task UploadTestFilesWithStructure(string keyPrefix, Dictionary files) + { + // Create local temp directory structure + var tempUploadDir = Path.Combine(Path.GetTempPath(), "upload-struct-" + Guid.NewGuid().ToString()); + + try + { + // Generate files with directory structure locally + foreach (var kvp in files) + { + var localPath = Path.Combine(tempUploadDir, kvp.Key.Replace('/', Path.DirectorySeparatorChar)); + var directory = Path.GetDirectoryName(localPath); + + if (!string.IsNullOrEmpty(directory)) + { + Directory.CreateDirectory(directory); + } + + UtilityMethods.GenerateFile(localPath, kvp.Value); + } + + // Upload entire directory using TransferUtility + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = tempUploadDir, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + await transferUtility.UploadDirectoryAsync(request); + } + finally + { + // Cleanup temp directory + if (Directory.Exists(tempUploadDir)) + { + try + { + Directory.Delete(tempUploadDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + /// + /// Verifies that no temporary files remain after download completion. + /// Temp files use the pattern: {originalPath}.s3tmp.{8-char-id} + /// + private static void VerifyNoTempFilesExist(string directoryPath) + { + if (Directory.Exists(directoryPath)) + { + var tempFiles = Directory.GetFiles(directoryPath, "*.s3tmp.*", SearchOption.AllDirectories); + Assert.AreEqual(0, tempFiles.Length, + $"No temporary files should remain. Found: {string.Join(", ", tempFiles)}"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadWithResponseTests.cs new file mode 100644 index 000000000000..db81e731129b --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityDownloadWithResponseTests.cs @@ -0,0 +1,692 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using Amazon.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.DownloadWithResponseAsync functionality. + /// These tests verify end-to-end functionality with actual S3 operations and file I/O. + /// + /// Most test scenarios (buffer sizes, part boundaries, error handling) are covered + /// in unit tests with mocked dependencies for faster execution. + /// + /// These integration tests focus on: + /// - Basic single-part downloads to files + /// - Basic multipart downloads to files + /// - Real S3 metadata preservation + /// - File handling (temp files, atomic writes, cleanup) + /// - Checksum validation + /// + [TestClass] + public class TransferUtilityDownloadWithResponseTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static string bucketName; + private static string tempDirectory; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + tempDirectory = Path.Combine(Path.GetTempPath(), "S3DownloadTests-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempDirectory); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + + // Clean up temp directory + if (Directory.Exists(tempDirectory)) + { + try + { + Directory.Delete(tempDirectory, recursive: true); + } + catch + { + // Best effort cleanup + } + } + + BaseClean(); + } + + [TestCleanup] + public void TestCleanup() + { + // Clean up any test files after each test + if (Directory.Exists(tempDirectory)) + { + foreach (var file in Directory.GetFiles(tempDirectory)) + { + try + { + File.Delete(file); + } + catch + { + // Best effort cleanup + } + } + } + } + + #region Single-Part Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + public async Task DownloadWithResponse_SinglePart_SmallObject() + { + // Arrange + var objectSize = 2 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + var downloadPath = Path.Combine(tempDirectory, key); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded data checksum should match"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + public async Task DownloadWithResponse_SinglePart_EmptyObject() + { + // Arrange + var key = UtilityMethods.GenerateName("empty-object"); + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + ContentBody = "" + }); + var downloadPath = Path.Combine(tempDirectory, key); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(0, response.Headers.ContentLength, "Content length should be 0"); + + // Verify ContentRange is null for 0-byte objects (matches S3 behavior) + Assert.IsNull(response.ContentRange, + "ContentRange should be null for 0-byte objects (matching S3 behavior)"); + + // Verify empty file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(0, fileInfo.Length, "Downloaded file should be empty"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + #endregion + + #region Multipart Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Multipart")] + public async Task DownloadWithResponse_Multipart_BasicDownload() + { + // Arrange - Simple multipart download to verify end-to-end S3 integration + var objectSize = 20 * MB; + var partSize = 8 * MB; + var key = UtilityMethods.GenerateName("multipart-download-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(uploadPath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + PartSize = partSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + PartSize = partSize + }; + + // Act + var response = await transferUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded data checksum should match"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + public async Task DownloadWithResponse_RangeStrategy_SmallSinglePartObject() + { + // Arrange - Small object that fits in single part, but using RANGE strategy + // This tests that ContentRange header is present even for single-part downloads + // when using RANGE strategy (S3 includes ContentRange when Range header is sent) + var objectSize = 2 * MB; // Less than default 8MB part size + var key = UtilityMethods.GenerateName("range-single-part-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(uploadPath); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath + }); + + // Act - Download with RANGE strategy even though only 1 part needed + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + MultipartDownloadType = MultipartDownloadType.RANGE, + PartSize = 8 * MB // Larger than file, so only 1 part needed + }; + + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify ContentRange is present (because RANGE strategy uses Range headers) + Assert.IsNotNull(response.ContentRange, + "ContentRange should be present when using RANGE strategy, even for single-part downloads"); + Assert.IsTrue(response.ContentRange.StartsWith("bytes "), + "ContentRange should have correct format"); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, + "Downloaded data checksum should match (RANGE strategy, single part)"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, + "Downloaded file size should match (RANGE strategy, single part)"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Multipart")] + public async Task DownloadWithResponse_Multipart_RangeDownload() + { + // Arrange - Test RANGE-based multipart download with custom part size + var objectSize = 20 * MB; + var uploadPartSize = 8 * MB; // Upload with 8MB parts + var downloadPartSize = 6 * MB; // Download with different 6MB parts + var key = UtilityMethods.GenerateName("multipart-range-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(uploadPath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + // Act - Download using RANGE strategy with different part size + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + MultipartDownloadType = MultipartDownloadType.RANGE, + PartSize = downloadPartSize + }; + + var response = await transferUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + ValidateResponse(response, objectSize); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, + "Downloaded data checksum should match (RANGE strategy)"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, + "Downloaded file size should match (RANGE strategy)"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + #endregion + + #region Checksum Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Checksum")] + public async Task DownloadWithResponse_MultipartObjectWithChecksums_NullsCompositeChecksums() + { + // Arrange - Upload a multipart object with checksums + var objectSize = 20 * MB; + var key = UtilityMethods.GenerateName("composite-checksum-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + // Upload with checksum algorithm to create composite checksum + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + ChecksumAlgorithm = ChecksumAlgorithm.CRC32, + PartSize = 8 * MB + }; + + var uploadUtility = new TransferUtility(Client); + await uploadUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test composite checksums"); + + // Act - Download with ChecksumMode enabled + var downloadRequest = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = downloadPath, + ChecksumMode = ChecksumMode.ENABLED + }; + + var response = await uploadUtility.DownloadWithResponseAsync(downloadRequest); + + // Assert - Verify ChecksumType is COMPOSITE + Assert.AreEqual(ChecksumType.COMPOSITE, response.ChecksumType, + "ChecksumType should be COMPOSITE for multipart objects"); + + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + Assert.IsNull(response.ChecksumCRC32, "ChecksumCRC32 should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC32C, "ChecksumCRC32C should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC64NVME, "ChecksumCRC64NVME should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA1, "ChecksumSHA1 should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA256, "ChecksumSHA256 should be null for composite checksums"); + + // Verify other response properties are still populated correctly + Assert.IsNotNull(response.ETag, "ETag should still be populated"); + Assert.IsTrue(response.Headers.ContentLength > 0, "ContentLength should be populated"); + + // Verify file was written correctly + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + #endregion + + #region Metadata Validation Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Metadata")] + public async Task DownloadWithResponse_PreservesMetadata() + { + // Arrange + var objectSize = 10 * MB; + var key = UtilityMethods.GenerateName("metadata-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + var putRequest = new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath, + ContentType = "application/octet-stream" + }; + putRequest.Metadata.Add("test-key", "test-value"); + putRequest.Metadata.Add("custom-header", "custom-value"); + + await Client.PutObjectAsync(putRequest); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual("application/octet-stream", response.Headers.ContentType); + + // S3 automatically prefixes user-defined metadata with "x-amz-meta-" + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-test-key"), + "Metadata should contain 'x-amz-meta-test-key'"); + Assert.AreEqual("test-value", response.Metadata["x-amz-meta-test-key"]); + + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-custom-header"), + "Metadata should contain 'x-amz-meta-custom-header'"); + Assert.AreEqual("custom-value", response.Metadata["x-amz-meta-custom-header"]); + + // Verify file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("Metadata")] + public async Task DownloadWithResponse_PreservesETag() + { + // Arrange + var objectSize = 15 * MB; + var key = UtilityMethods.GenerateName("etag-test"); + var uploadPath = Path.Combine(Path.GetTempPath(), key + "-upload"); + var downloadPath = Path.Combine(tempDirectory, key); + + UtilityMethods.GenerateFile(uploadPath, objectSize); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = uploadPath + }); + + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + var expectedETag = metadata.ETag; + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.AreEqual(expectedETag, response.ETag, "ETag should match"); + + // Verify file was written + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + + // Cleanup upload file + File.Delete(uploadPath); + } + + #endregion + + #region File Handling Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("FileHandling")] + public async Task DownloadWithResponse_CreatesDirectoryIfNeeded() + { + // Arrange + var objectSize = 5 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + + // Create a nested directory path that doesn't exist + var nestedDir = Path.Combine(tempDirectory, "level1", "level2", "level3"); + var downloadPath = Path.Combine(nestedDir, key); + + Assert.IsFalse(Directory.Exists(nestedDir), "Nested directory should not exist initially"); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsTrue(Directory.Exists(nestedDir), "Nested directory should be created"); + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist in nested directory"); + + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded data checksum should match"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("Download")] + [TestCategory("FileHandling")] + public async Task DownloadWithResponse_OverwritesExistingFile() + { + // Arrange + var objectSize = 5 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + var downloadPath = Path.Combine(tempDirectory, key); + + // Create an existing file with different content + var existingContent = new byte[1024]; + new Random().NextBytes(existingContent); + File.WriteAllBytes(downloadPath, existingContent); + + var existingChecksum = CalculateFileChecksum(downloadPath); + Assert.AreNotEqual(expectedChecksum, existingChecksum, "Existing file should have different content"); + + // Act + var transferUtility = new TransferUtility(Client); + var response = await transferUtility.DownloadWithResponseAsync(downloadPath, bucketName, key); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsTrue(File.Exists(downloadPath), "Downloaded file should exist"); + + var downloadedChecksum = CalculateFileChecksum(downloadPath); + Assert.AreEqual(expectedChecksum, downloadedChecksum, "Downloaded file should have new content"); + + var fileInfo = new FileInfo(downloadPath); + Assert.AreEqual(objectSize, fileInfo.Length, "Downloaded file size should match new content"); + + // Verify no temp files remain + VerifyNoTempFilesExist(downloadPath); + } + + #endregion + + #region Helper Methods + + /// + /// Creates a test object in S3 with the specified size and returns its key and checksum. + /// + private static async Task<(string key, string checksum)> CreateTestObjectWithChecksum(long objectSize) + { + var key = UtilityMethods.GenerateName("download-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var checksum = CalculateFileChecksum(filePath); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }); + + // Cleanup temp upload file + File.Delete(filePath); + + return (key, checksum); + } + + /// + /// Calculates the MD5 checksum of a file. + /// + private static string CalculateFileChecksum(string filePath) + { + using (var md5 = System.Security.Cryptography.MD5.Create()) + using (var stream = File.OpenRead(filePath)) + { + var hash = md5.ComputeHash(stream); + return Convert.ToBase64String(hash); + } + } + + /// + /// Validates that the response contains expected values. + /// + private static void ValidateResponse(TransferUtilityDownloadResponse response, long expectedSize) + { + Assert.IsNotNull(response.Headers, "Headers should not be null"); + Assert.AreEqual(expectedSize, response.Headers.ContentLength, "Content length should match"); + Assert.IsNotNull(response.ETag, "ETag should not be null"); + } + + /// + /// Verifies that no temporary files remain after download completion. + /// Temp files use the pattern: {originalPath}.s3tmp.{8-char-id} + /// + private static void VerifyNoTempFilesExist(string filePath) + { + var directory = Path.GetDirectoryName(filePath); + var fileName = Path.GetFileName(filePath); + + if (Directory.Exists(directory)) + { + var tempFiles = Directory.GetFiles(directory, fileName + ".s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length, + $"No temporary files should remain. Found: {string.Join(", ", tempFiles)}"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs new file mode 100644 index 000000000000..f463c4f7da57 --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityOpenStreamTests.cs @@ -0,0 +1,746 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using Amazon.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.OpenStreamWithResponse functionality. + /// These tests verify end-to-end functionality with actual S3 operations. + /// + /// Most test scenarios (buffer sizes, part boundaries, stream behavior) are covered + /// in BufferedMultipartStreamTests.cs with mocked dependencies for faster execution. + /// + /// These integration tests focus on: + /// - Basic single-part downloads + /// - Basic multipart downloads + /// - Real S3 metadata preservation + /// + [TestClass] + public class TransferUtilityOpenStreamTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static string bucketName; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + BaseClean(); + } + + #region Single-Part Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + public async Task OpenStream_SinglePart_SmallObject() + { + // Arrange + var objectSize = 2 * MB; + var (key, expectedChecksum) = await CreateTestObjectWithChecksum(objectSize); + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(1 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, "Downloaded data checksum should match"); + Assert.AreEqual(objectSize, downloadedBytes.Length, "Downloaded size should match"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + public async Task OpenStream_SinglePart_EmptyObject() + { + // Arrange + var key = UtilityMethods.GenerateName("empty-object"); + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + ContentBody = "" + }); + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.AreEqual(0, response.Headers.ContentLength); + + var buffer = new byte[1024]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.AreEqual(0, bytesRead, "Should read 0 bytes from empty object"); + } + } + + #endregion + + #region Multipart Test + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Multipart")] + public async Task OpenStream_Multipart_BasicDownload() + { + // Arrange - Simple multipart download to verify end-to-end S3 integration + var objectSize = 20 * MB; + var partSize = 8 * MB; + var key = UtilityMethods.GenerateName("openstream-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = partSize // Force multipart upload with explicit part size + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + // Note: PartsCount is only returned when PartNumber is specified in the request + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + var request = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = partSize + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(request)) + { + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should not be null"); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, "Downloaded data checksum should match"); + Assert.AreEqual(objectSize, downloadedBytes.Length, "Downloaded size should match"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Multipart")] + public async Task OpenStream_Multipart_RangeDownload() + { + // Arrange - Test RANGE-based multipart download with custom part size + var objectSize = 20 * MB; + var uploadPartSize = 8 * MB; // Upload with 8MB parts + var downloadPartSize = 6 * MB; // Download with different 6MB parts to test RANGE strategy + var key = UtilityMethods.GenerateName("openstream-range-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize // Force multipart upload + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test multipart download"); + + // Act - Download using RANGE strategy with different part size + var request = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + MultipartDownloadType = MultipartDownloadType.RANGE, + PartSize = downloadPartSize // Use different part size than upload + }; + + using (var response = await transferUtility.OpenStreamWithResponseAsync(request)) + { + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should not be null"); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Downloaded data checksum should match (RANGE strategy)"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match (RANGE strategy)"); + } + } + + #endregion + + #region Checksum Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Checksum")] + public async Task OpenStream_MultipartObjectWithChecksums_NullsCompositeChecksums() + { + // Arrange - Upload a multipart object with checksums + // Object must be > 16MB to trigger multipart upload with checksums + var objectSize = 20 * MB; + var key = UtilityMethods.GenerateName("composite-checksum-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Upload with checksum algorithm to create composite checksum + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + ChecksumAlgorithm = ChecksumAlgorithm.CRC32, + PartSize = 8 * MB + }; + + var uploadUtility = new TransferUtility(Client); + await uploadUtility.UploadAsync(uploadRequest); + + // Verify object is multipart by checking PartsCount + // Note: PartsCount is only returned when PartNumber is specified in the request + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test composite checksums"); + + // Act - Download with ChecksumMode enabled + var openStreamRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + ChecksumMode = ChecksumMode.ENABLED + }; + + using (var response = await uploadUtility.OpenStreamWithResponseAsync(openStreamRequest)) + { + // Assert - Verify ChecksumType is COMPOSITE + Assert.AreEqual(ChecksumType.COMPOSITE, response.ChecksumType, + "ChecksumType should be COMPOSITE for multipart objects"); + + // Per spec: "If ChecksumType is COMPOSITE, set all checksum value members to null + // as the checksum value returned from a part GET request is not the composite + // checksum for the entire object" + Assert.IsNull(response.ChecksumCRC32, "ChecksumCRC32 should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC32C, "ChecksumCRC32C should be null for composite checksums"); + Assert.IsNull(response.ChecksumCRC64NVME, "ChecksumCRC64NVME should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA1, "ChecksumSHA1 should be null for composite checksums"); + Assert.IsNull(response.ChecksumSHA256, "ChecksumSHA256 should be null for composite checksums"); + + // Verify other response properties are still populated correctly + Assert.IsNotNull(response.ETag, "ETag should still be populated"); + Assert.IsTrue(response.Headers.ContentLength > 0, "ContentLength should be populated"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should be available"); + + // Verify we can still read the stream + var buffer = new byte[1024]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.IsTrue(bytesRead > 0, "Should be able to read from stream despite null checksums"); + } + } + + #endregion + + #region Metadata Validation Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Metadata")] + public async Task OpenStream_PreservesMetadata() + { + // Arrange + var objectSize = 10 * MB; + var key = UtilityMethods.GenerateName("metadata-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + var putRequest = new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + ContentType = "application/octet-stream" + }; + putRequest.Metadata.Add("test-key", "test-value"); + putRequest.Metadata.Add("custom-header", "custom-value"); + + await Client.PutObjectAsync(putRequest); + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("application/octet-stream", response.Headers.ContentType); + + // S3 automatically prefixes user-defined metadata with "x-amz-meta-" + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-test-key"), + "Metadata should contain 'x-amz-meta-test-key'"); + Assert.AreEqual("test-value", response.Metadata["x-amz-meta-test-key"]); + + Assert.IsTrue(response.Metadata.Keys.Contains("x-amz-meta-custom-header"), + "Metadata should contain 'x-amz-meta-custom-header'"); + Assert.AreEqual("custom-value", response.Metadata["x-amz-meta-custom-header"]); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("Metadata")] + public async Task OpenStream_PreservesETag() + { + // Arrange + var objectSize = 15 * MB; + var key = UtilityMethods.GenerateName("etag-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }); + + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + var expectedETag = metadata.ETag; + + // Act + var transferUtility = new TransferUtility(Client); + using (var response = await transferUtility.OpenStreamWithResponseAsync(bucketName, key)) + { + // Assert + Assert.IsNotNull(response.ETag); + Assert.AreEqual(expectedETag, response.ETag); + } + } + + #endregion + + #region MaxInMemoryParts Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + public async Task OpenStream_WithCustomMaxInMemoryParts_DownloadsSuccessfully() + { + // Arrange - Upload as multipart to test MaxInMemoryParts buffering + var objectSize = 32 * MB; + var uploadPartSize = 8 * MB; // Force multipart upload with 4 parts + var downloadPartSize = 8 * MB; + var maxInMemoryParts = 2; // Only buffer 2 parts in memory at once + var key = UtilityMethods.GenerateName("maxinmemory-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize // Force multipart upload + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart to test MaxInMemoryParts"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize, + MaxInMemoryParts = maxInMemoryParts + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.IsNotNull(response.ResponseStream, "ResponseStream should not be null"); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Downloaded data checksum should match with custom MaxInMemoryParts"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match with custom MaxInMemoryParts"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + public async Task OpenStream_WithDefaultMaxInMemoryParts_DownloadsSuccessfully() + { + // Arrange - Upload as multipart, download without specifying MaxInMemoryParts + var objectSize = 24 * MB; + var uploadPartSize = 8 * MB; + var downloadPartSize = 8 * MB; + var key = UtilityMethods.GenerateName("default-maxinmemory-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize + // MaxInMemoryParts not specified - should use default (1024) + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + ValidateHeaders(response, objectSize); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Downloaded data checksum should match with default MaxInMemoryParts"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match with default MaxInMemoryParts"); + } + } + + [DataTestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + [DataRow(1, DisplayName = "MaxInMemoryParts = 1 (minimal buffering)")] + [DataRow(2, DisplayName = "MaxInMemoryParts = 2")] + [DataRow(4, DisplayName = "MaxInMemoryParts = 4")] + [DataRow(10, DisplayName = "MaxInMemoryParts = 10")] + public async Task OpenStream_WithVariousMaxInMemoryParts_DownloadsSuccessfully(int maxInMemoryParts) + { + // Arrange - Upload as multipart, test various MaxInMemoryParts values + var objectSize = 24 * MB; + var uploadPartSize = 8 * MB; // Creates 3 parts + var downloadPartSize = 8 * MB; + var key = UtilityMethods.GenerateName($"maxinmemory-{maxInMemoryParts}-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize, + MaxInMemoryParts = maxInMemoryParts + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response, $"Response should not be null with MaxInMemoryParts={maxInMemoryParts}"); + Assert.IsNotNull(response.ResponseStream, + $"ResponseStream should not be null with MaxInMemoryParts={maxInMemoryParts}"); + + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(2 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + $"Downloaded data checksum should match with MaxInMemoryParts={maxInMemoryParts}"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + $"Downloaded size should match with MaxInMemoryParts={maxInMemoryParts}"); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("OpenStream")] + [TestCategory("MaxInMemoryParts")] + [TestCategory("Multipart")] + public async Task OpenStream_LargeObjectWithSmallMaxInMemoryParts_DownloadsSuccessfully() + { + // Arrange - Test memory-constrained scenario with large object + // This simulates downloading a large file while limiting memory usage + var objectSize = 40 * MB; + var uploadPartSize = 8 * MB; // Creates 5 parts + var downloadPartSize = 8 * MB; + var maxInMemoryParts = 2; // Only buffer 2 parts (16MB) instead of all 5 (40MB) + var key = UtilityMethods.GenerateName("large-maxinmemory-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var expectedChecksum = CalculateFileChecksum(filePath); + + // Upload using TransferUtility to ensure multipart upload + var uploadRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath, + PartSize = uploadPartSize + }; + + var transferUtility = new TransferUtility(Client); + await transferUtility.UploadAsync(uploadRequest); + + // Verify object is multipart + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key, + PartNumber = 1 + }); + Assert.IsTrue(metadata.PartsCount > 1, "Object should be multipart"); + + var downloadRequest = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + PartSize = downloadPartSize, + MaxInMemoryParts = maxInMemoryParts + }; + + // Act + using (var response = await transferUtility.OpenStreamWithResponseAsync(downloadRequest)) + { + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + ValidateHeaders(response, objectSize); + + // Read in smaller chunks to simulate streaming consumption + var downloadedBytes = await ReadStreamToByteArray(response.ResponseStream, objectSize, (int)(1 * MB)); + var actualChecksum = CalculateChecksum(downloadedBytes); + + Assert.AreEqual(expectedChecksum, actualChecksum, + "Large object should download correctly with limited MaxInMemoryParts"); + Assert.AreEqual(objectSize, downloadedBytes.Length, + "Downloaded size should match for large object with limited MaxInMemoryParts"); + } + } + + #endregion + + #region Helper Methods + + /// + /// Creates a test object in S3 with the specified size and returns its key and checksum. + /// + private static async Task<(string key, string checksum)> CreateTestObjectWithChecksum(long objectSize) + { + var key = UtilityMethods.GenerateName("openstream-test"); + var filePath = Path.Combine(Path.GetTempPath(), key); + UtilityMethods.GenerateFile(filePath, objectSize); + + // Calculate checksum before upload + var checksum = CalculateFileChecksum(filePath); + + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }); + + return (key, checksum); + } + + /// + /// Calculates the MD5 checksum of a file. + /// + private static string CalculateFileChecksum(string filePath) + { + using (var md5 = System.Security.Cryptography.MD5.Create()) + using (var stream = File.OpenRead(filePath)) + { + var hash = md5.ComputeHash(stream); + return Convert.ToBase64String(hash); + } + } + + /// + /// Validates that the response headers contain expected values. + /// + private static void ValidateHeaders(TransferUtilityOpenStreamResponse response, long expectedSize) + { + Assert.IsNotNull(response.Headers, "Headers should not be null"); + Assert.AreEqual(expectedSize, response.Headers.ContentLength, "Content length should match"); + Assert.IsNotNull(response.ETag, "ETag should not be null"); + } + + /// + /// Reads a stream completely into a byte array using the specified buffer size. + /// + private static async Task ReadStreamToByteArray(Stream stream, long totalSize, int bufferSize) + { + var result = new byte[totalSize]; + var buffer = new byte[bufferSize]; + long totalRead = 0; + + int bytesRead; + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + Array.Copy(buffer, 0, result, totalRead, bytesRead); + totalRead += bytesRead; + } + + Assert.AreEqual(totalSize, totalRead, "Should read expected number of bytes"); + return result; + } + + /// + /// Calculates the MD5 checksum of a byte array. + /// + private static string CalculateChecksum(byte[] data) + { + using (var md5 = System.Security.Cryptography.MD5.Create()) + { + var hash = md5.ComputeHash(data); + return Convert.ToBase64String(hash); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 856742c76a02..bdc72ecdcea4 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -13,6 +13,7 @@ using Amazon.Util; using System.Net.Mime; using System.Runtime.InteropServices.ComTypes; +using System.Threading.Tasks; namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 { @@ -105,6 +106,113 @@ public void SimpleUploadProgressTest() progressValidator.AssertOnCompletion(); } + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadInitiatedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\InitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(10 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + UploadWithLifecycleEvents(fileName, 10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadCompletedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\CompletedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(10 * MEG_SIZE, args.TotalBytes); + Assert.IsTrue(!string.IsNullOrEmpty(args.Response.ETag)); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + UploadWithLifecycleEvents(fileName, 10 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\FailedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(5 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + // For failed uploads, transferred bytes should be less than or equal to total bytes + Assert.IsTrue(args.TransferredBytes <= args.TotalBytes); + } + }; + + // Use invalid bucket name to force failure + var invalidBucketName = "invalid-bucket-name-" + Guid.NewGuid().ToString(); + + try + { + UploadWithLifecycleEventsAndBucket(fileName, 5 * MEG_SIZE, invalidBucketName, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for invalid bucket"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } + + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadCompleteLifecycleTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadTest\CompleteLifecycle"); + + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(8 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(8 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + UploadWithLifecycleEvents(fileName, 8 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + [TestMethod] [TestCategory("S3")] public void SimpleUpload() @@ -375,6 +483,49 @@ public void UploadUnseekableStreamFileSizeBetweenMinPartSizeAndPartBufferSize() } } + [TestMethod] + [TestCategory("S3")] + public void SimpleUploadProgressTotalBytesTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleUploadProgressTotalBytes\TestFile"); + var filePath = Path.Combine(BasePath, fileName); + var fileSize = 10 * MEG_SIZE; + + // Create test file + UtilityMethods.GenerateFile(filePath, fileSize); + + var transferConfig = new TransferUtilityConfig() + { + MinSizeBeforePartUpload = 20 * MEG_SIZE, + }; + + var progressValidator = new TransferProgressValidator + { + Validate = (progress) => + { + Assert.IsTrue(progress.TotalBytes > 0, "TotalBytes should be greater than 0"); + Assert.AreEqual(fileSize, progress.TotalBytes, "TotalBytes should equal file size"); + Assert.AreEqual(filePath, progress.FilePath, "FilePath should match expected path"); + } + }; + + using (var fileTransferUtility = new TransferUtility(Client, transferConfig)) + { + var request = new TransferUtilityUploadRequest() + { + BucketName = bucketName, + FilePath = filePath, + Key = fileName + }; + + request.UploadProgressEvent += progressValidator.OnProgressEvent; + + fileTransferUtility.Upload(request); + + progressValidator.AssertOnCompletion(); + } + } + [TestMethod] [TestCategory("S3")] public void UploadUnSeekableStreamWithZeroLengthTest() @@ -620,6 +771,210 @@ public void MultipartUploadProgressTest() } } + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadInitiatedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\InitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(20 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + // Use 20MB+ to trigger multipart upload + UploadWithLifecycleEvents(fileName, 20 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadCompletedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\CompletedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(25 * MEG_SIZE, args.TotalBytes); + Assert.IsTrue(!string.IsNullOrEmpty(args.Response.ETag)); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + // Use 25MB to trigger multipart upload + UploadWithLifecycleEvents(fileName, 25 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\FailedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsTrue(args.TotalBytes > 0); + Assert.AreEqual(22 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + // For failed uploads, transferred bytes should be less than or equal to total bytes + Assert.IsTrue(args.TransferredBytes <= args.TotalBytes); + } + }; + + // Use invalid bucket name to force failure with multipart upload size + var invalidBucketName = "invalid-bucket-name-" + Guid.NewGuid().ToString(); + + try + { + // Use 22MB to trigger multipart upload + UploadWithLifecycleEventsAndBucket(fileName, 22 * MEG_SIZE, invalidBucketName, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for invalid bucket"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadCompleteLifecycleTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\CompleteLifecycle"); + + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(30 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(30 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName)); + } + }; + + // Use 30MB to trigger multipart upload + UploadWithLifecycleEvents(fileName, 30 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamInitiatedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"MultipartUploadTest\UnseekableStreamInitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + } + }; + UploadUnseekableStreamWithLifecycleEvents(20 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamCompletedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + Assert.AreEqual(0, args.TransferredBytes); // unseekable streams we dont attach and progress listeners so we wont have transferredbytes. + } + }; + UploadUnseekableStreamWithLifecycleEvents(20 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamFailedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + } + }; + + // Use invalid bucket name to force failure with multipart upload size + var invalidBucketName = "invalid-bucket-name-" + Guid.NewGuid().ToString(); + + try + { + UploadUnseekableStreamWithLifecycleEventsAndBucket(20 * MEG_SIZE, invalidBucketName, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for invalid bucket"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } + + [TestMethod] + [TestCategory("S3")] + public void MultipartUploadUnseekableStreamCompleteLifecycleTest() + { + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(-1, args.TotalBytes); // Unseekable streams have unknown length + Assert.AreEqual(0, args.TransferredBytes); // unseekable streams we dont attach and progress listeners so we wont have transferredbytes. + } + }; + + UploadUnseekableStreamWithLifecycleEvents(18 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + [TestMethod] [TestCategory("S3")] public void MultipartGetNumberTest() @@ -965,62 +1320,323 @@ public void DownloadProgressZeroLengthFileTest() progressValidator.AssertOnCompletion(); } - void Download(string fileName, long size, TransferProgressValidator progressValidator) + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadInitiatedEventTest() { - var key = fileName; - var originalFilePath = Path.Combine(BasePath, fileName); - UtilityMethods.GenerateFile(originalFilePath, size); - - Client.PutObject(new PutObjectRequest + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\InitiatedEvent"); + var eventValidator = new TransferLifecycleEventValidator { - BucketName = bucketName, - Key = key, - FilePath = originalFilePath - }); + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + // Note: DownloadInitiatedEventArgs does not have TotalBytes since we don't know the size until GetObjectResponse + } + }; + DownloadWithLifecycleEvents(fileName, 10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } - var downloadedFilePath = originalFilePath + ".dn"; + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadCompletedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\CompletedEvent"); + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(10 * MEG_SIZE, args.TotalBytes); + Assert.IsTrue(!string.IsNullOrEmpty(args.Response.ETag)); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + } + }; + DownloadWithLifecycleEvents(fileName, 10 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } - var transferUtility = new TransferUtility(Client); - var request = new TransferUtilityDownloadRequest + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\FailedEvent"); + var eventValidator = new TransferLifecycleEventValidator { - BucketName = bucketName, - FilePath = downloadedFilePath, - Key = key + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + + // Non-existent key should always be early failure with unknown total bytes + Assert.AreEqual(-1, args.TotalBytes, "Non-existent key should result in TotalBytes = -1"); + Assert.AreEqual(0, args.TransferredBytes, "No bytes should be transferred for non-existent key"); + } }; - if (progressValidator != null) + + // Use non-existent key to force failure + var nonExistentKey = "non-existent-key-" + Guid.NewGuid().ToString(); + + try { - request.WriteObjectProgressEvent += progressValidator.OnProgressEvent; + DownloadWithLifecycleEventsAndKey(fileName, nonExistentKey, null, null, eventValidator); + Assert.Fail("Expected an exception to be thrown for non-existent key"); } - transferUtility.Download(request); + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + eventValidator.AssertEventFired(); + } + } - UtilityMethods.CompareFiles(originalFilePath, downloadedFilePath); + [TestMethod] + [TestCategory("S3")] + public void SimpleDownloadCompleteLifecycleTest() + { + var fileName = UtilityMethods.GenerateName(@"SimpleDownloadTest\CompleteLifecycle"); + + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + // Note: DownloadInitiatedEventArgs does not have TotalBytes since we don't know the size until GetObjectResponse + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.AreEqual(8 * MEG_SIZE, args.TotalBytes); + Assert.AreEqual(args.FilePath, Path.Combine(BasePath, fileName + ".download")); + } + }; + + DownloadWithLifecycleEvents(fileName, 8 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); } [TestMethod] [TestCategory("S3")] - public void OpenStreamTest() + public async Task MultipartDownloadProgressTest() { - var fileName = UtilityMethods.GenerateName(@"OpenStreamTest\File"); - var key = fileName; + var fileName = UtilityMethods.GenerateName("MultipartDownloadProgress"); var originalFilePath = Path.Combine(BasePath, fileName); - UtilityMethods.GenerateFile(originalFilePath, 2 * MEG_SIZE); - Client.PutObject(new PutObjectRequest + var downloadedFilePath = originalFilePath + ".dn"; + + // Upload a large file (20MB to ensure multipart) + UtilityMethods.GenerateFile(originalFilePath, 20 * MEG_SIZE); + await Client.PutObjectAsync(new PutObjectRequest { BucketName = bucketName, - Key = key, + Key = fileName, FilePath = originalFilePath }); - var transferUtility = new TransferUtility(Client); - var stream = transferUtility.OpenStream(bucketName, key); - Assert.IsNotNull(stream); - Assert.IsTrue(stream.CanRead); - stream.Close(); - } + int inProgressEventCount = 0; + int completedEventCount = 0; + long lastTransferredBytes = 0; - /// - /// Partial download resumption support can erroneously trigger retry with - /// byte range of 0 to Long.MaxValue if a zero length object is the first object + var progressValidator = new TransferProgressValidator + { + ValidateProgressInterval = true, // Enable interval validation to ensure events fire + Validate = (p) => + { + Assert.AreEqual(bucketName, p.BucketName); + Assert.AreEqual(fileName, p.Key); + Assert.IsNotNull(p.FilePath); + Assert.IsTrue(p.TransferredBytes >= lastTransferredBytes); + + if (p.IsCompleted) + { + completedEventCount++; + Assert.AreEqual(p.TotalBytes, p.TransferredBytes); + } + else + { + inProgressEventCount++; + Assert.IsTrue(p.TransferredBytes < p.TotalBytes); + } + + lastTransferredBytes = p.TransferredBytes; + } + }; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = fileName + }; + request.WriteObjectProgressEvent += progressValidator.OnProgressEvent; + + // Use DownloadWithResponseAsync to trigger MultipartDownloadCommand + var response = await transferUtility.DownloadWithResponseAsync(request); + + progressValidator.AssertOnCompletion(); + + // Validate that in-progress events actually fired during the download + Assert.IsTrue(inProgressEventCount > 0, + $"Expected in-progress events to fire during multipart download, but got {inProgressEventCount}"); + Assert.AreEqual(1, completedEventCount); + + Assert.IsNotNull(response); + UtilityMethods.CompareFiles(originalFilePath, downloadedFilePath); + } + + [TestMethod] + [TestCategory("S3")] + public async Task MultipartDownloadInitiatedCompletedEventsTest() + { + var fileName = UtilityMethods.GenerateName("MultipartDownloadEvents"); + var originalFilePath = Path.Combine(BasePath, fileName); + var downloadedFilePath = originalFilePath + ".dn"; + long expectedSize = 20 * MEG_SIZE; + + // Upload large file + UtilityMethods.GenerateFile(originalFilePath, expectedSize); + await Client.PutObjectAsync(new PutObjectRequest + { + BucketName = bucketName, + Key = fileName, + FilePath = originalFilePath + }); + + bool initiatedEventFired = false; + bool completedEventFired = false; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = fileName + }; + + request.DownloadInitiatedEvent += (s, e) => + { + Assert.IsFalse(initiatedEventFired, "Initiated event should fire only once"); + initiatedEventFired = true; + Assert.AreEqual(fileName, e.Request.Key); + }; + + request.DownloadCompletedEvent += (s, e) => + { + Assert.IsFalse(completedEventFired, "Completed event should fire only once"); + completedEventFired = true; + Assert.AreEqual(expectedSize, e.TotalBytes); + Assert.AreEqual(expectedSize, e.TransferredBytes); + }; + + var response = await transferUtility.DownloadWithResponseAsync(request); + + Assert.IsTrue(initiatedEventFired, "Initiated event should have fired"); + Assert.IsTrue(completedEventFired, "Completed event should have fired"); + Assert.IsNotNull(response); + } + + [TestMethod] + [TestCategory("S3")] + public async Task MultipartDownloadFailedEventTest() + { + var fileName = UtilityMethods.GenerateName("MultipartDownloadFailed"); + var downloadedFilePath = Path.Combine(BasePath, fileName + ".dn"); + + bool failedEventFired = false; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = "non-existent-key-" + Guid.NewGuid() // Intentionally non-existent + }; + + request.DownloadFailedEvent += (s, e) => + { + failedEventFired = true; + Assert.IsNotNull(e.FilePath); + }; + + try + { + await transferUtility.DownloadWithResponseAsync(request); + Assert.Fail("Expected an exception to be thrown for non-existent key"); + } + catch (AmazonS3Exception) + { + // Expected exception - the failed event should have been fired + Assert.IsTrue(failedEventFired, "Failed event should have fired"); + } + } + + void Download(string fileName, long size, TransferProgressValidator progressValidator) + { + var key = fileName; + var originalFilePath = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(originalFilePath, size); + + Client.PutObject(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = originalFilePath + }); + + var downloadedFilePath = originalFilePath + ".dn"; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = key + }; + if (progressValidator != null) + { + request.WriteObjectProgressEvent += progressValidator.OnProgressEvent; + } + transferUtility.Download(request); + + UtilityMethods.CompareFiles(originalFilePath, downloadedFilePath); + } + + [TestMethod] + [TestCategory("S3")] + public void OpenStreamTest() + { + var fileName = UtilityMethods.GenerateName(@"OpenStreamTest\File"); + var key = fileName; + var originalFilePath = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(originalFilePath, 2 * MEG_SIZE); + Client.PutObject(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = originalFilePath + }); + + var transferUtility = new TransferUtility(Client); + var stream = transferUtility.OpenStream(bucketName, key); + Assert.IsNotNull(stream); + Assert.IsTrue(stream.CanRead); + stream.Close(); + } + + /// + /// Partial download resumption support can erroneously trigger retry with + /// byte range of 0 to Long.MaxValue if a zero length object is the first object /// to be download to a new folder path - S3 then yields an invalid byte range /// error on the retry. /// Test ensures the fix, to test that the folder path exists before trying to @@ -1079,6 +1695,248 @@ public void TestMultipartUploadWithSetContentTypeNotOverwritten() Assert.IsTrue(metadata.Headers.ContentType.Equals(MediaTypeNames.Text.Plain)); } + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncSmallFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\SmallFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 1 * MEG_SIZE; // Small file for single-part upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For small files, we expect single-part upload behavior - ETag should be MD5 format (no quotes or dashes) + // ETag format varies, so we just ensure it's a valid non-empty string + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncLargeFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\LargeFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 20 * MEG_SIZE; // Large file for multipart upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For multipart uploads, ETag format is different (contains dashes) + // We just validate it's a valid string for now + Console.WriteLine($"ETag (multipart): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncStreamTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\StreamFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 5 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + using (var fileStream = File.OpenRead(path)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + InputStream = fileStream, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + Console.WriteLine($"ETag (stream): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually streamed and uploaded correctly + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded stream size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + + // Validate content by downloading and comparing + var downloadPath = path + ".download"; + await transferUtility.DownloadAsync(new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = fileName, + FilePath = downloadPath + }); + UtilityMethods.CompareFiles(path, downloadPath); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncWithChecksumTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\ChecksumFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 2 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + // Calculate checksum for the file + var fileBytes = File.ReadAllBytes(path); + var precalculatedChecksum = CryptoUtilFactory.CryptoInstance.ComputeCRC32Hash(fileBytes); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType, + ChecksumCRC32 = precalculatedChecksum + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // Validate checksum fields if they should be present + // Note: Checksum fields in response may not always be set depending on S3 behavior + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"ChecksumCRC32: {response.ChecksumCRC32}"); + Console.WriteLine($"ChecksumType: {response.ChecksumType}"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncCompareWithLegacyUploadTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\CompareFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 8 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + // Test the new UploadWithResponseAsync method + var responseRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-with-response", + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(responseRequest); + + // Test the legacy Upload method for comparison + var legacyRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-legacy", + ContentType = octetStreamContentType + }; + + await transferUtility.UploadAsync(legacyRequest); + + // Validate that both uploads resulted in the same file being uploaded + var responseMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-with-response" + }); + + var legacyMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-legacy" + }); + + // Both should have the same file size and content type + Assert.AreEqual(responseMetadata.ContentLength, legacyMetadata.ContentLength, "File sizes should match"); + Assert.AreEqual(responseMetadata.Headers.ContentType, legacyMetadata.Headers.ContentType, "Content types should match"); + + // Validate the response contains the expected ETag + Assert.IsNotNull(response.ETag, "Response ETag should not be null"); + Assert.AreEqual(response.ETag, responseMetadata.ETag, "Response ETag should match metadata ETag"); + + Console.WriteLine($"UploadWithResponseAsync ETag: {response.ETag}"); + Console.WriteLine($"Legacy upload ETag: {legacyMetadata.ETag}"); + Console.WriteLine($"File size: {fileSize}, Response metadata size: {responseMetadata.ContentLength}"); + } + } + #if ASYNC_AWAIT [TestMethod] @@ -1387,7 +2245,7 @@ public void AssertOnCompletion() if (this.ProgressEventException != null) throw this.ProgressEventException; - // Add some time for the background thread to finish before checking the complete + // Since AWSSDKUtils.InvokeInBackground fires the event in the background it is possible that we check too early that the event has fired. In this case, we sleep and check again. for (int retries = 1; retries < 5 && !this.IsProgressEventComplete; retries++) { Thread.Sleep(1000 * retries); @@ -1480,6 +2338,212 @@ public void OnProgressEvent(object sender, T progress) } } } + + class TransferLifecycleEventValidator + { + public Action Validate { get; set; } + public bool EventFired { get; private set; } + public Exception EventException { get; private set; } + + public void OnEventFired(object sender, T eventArgs) + { + try + { + Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); + Validate?.Invoke(eventArgs); + EventFired = true; // Only set if validation passes + } + catch (Exception ex) + { + EventException = ex; + EventFired = false; // Ensure we don't mark as fired on failure + Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); + // Don't re-throw, let AssertEventFired() handle it + } + } + + public void AssertEventFired() + { + if (EventException != null) + throw EventException; + + // Since events are now fired synchronously, we can check immediately without retries + Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); + } + } + + void UploadWithLifecycleEvents(string fileName, long size, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + UploadWithLifecycleEventsAndBucket(fileName, size, bucketName, initiatedValidator, completedValidator, failedValidator); + } + + void UploadWithLifecycleEventsAndBucket(string fileName, long size, string targetBucketName, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var key = fileName; + var path = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(path, size); + + var config = new TransferUtilityConfig(); + var transferUtility = new TransferUtility(Client, config); + var request = new TransferUtilityUploadRequest + { + BucketName = targetBucketName, + FilePath = path, + Key = key, + ContentType = octetStreamContentType + }; + + if (initiatedValidator != null) + { + request.UploadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.UploadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.UploadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Upload(request); + } + + void UploadUnseekableStreamWithLifecycleEvents(long size, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + UploadUnseekableStreamWithLifecycleEventsAndBucket(size, bucketName, initiatedValidator, completedValidator, failedValidator); + } + + void UploadUnseekableStreamWithLifecycleEventsAndBucket(long size, string targetBucketName, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var fileName = UtilityMethods.GenerateName(@"UnseekableStreamUpload\File"); + var key = fileName; + var path = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(path, size); + + // Convert file to unseekable stream + var stream = GenerateUnseekableStreamFromFile(path); + + var config = new TransferUtilityConfig(); + var transferUtility = new TransferUtility(Client, config); + var request = new TransferUtilityUploadRequest + { + BucketName = targetBucketName, + InputStream = stream, + Key = key, + ContentType = octetStreamContentType + }; + + if (initiatedValidator != null) + { + request.UploadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.UploadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.UploadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Upload(request); + } + + void DownloadWithLifecycleEvents(string fileName, long size, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + // First upload the file so we have something to download + var key = fileName; + var originalFilePath = Path.Combine(BasePath, fileName); + UtilityMethods.GenerateFile(originalFilePath, size); + + Client.PutObject(new PutObjectRequest + { + BucketName = bucketName, + Key = key, + FilePath = originalFilePath + }); + + var downloadedFilePath = originalFilePath + ".download"; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = key + }; + + if (initiatedValidator != null) + { + request.DownloadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.DownloadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.DownloadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Download(request); + } + + void DownloadWithLifecycleEventsAndKey(string fileName, string keyToDownload, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var downloadedFilePath = Path.Combine(BasePath, fileName + ".download"); + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + FilePath = downloadedFilePath, + Key = keyToDownload + }; + + if (initiatedValidator != null) + { + request.DownloadInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.DownloadCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.DownloadFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.Download(request); + } + private class UnseekableStream : MemoryStream { private readonly bool _setZeroLengthStream; @@ -1507,6 +2571,109 @@ public override long Length } } } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectoryFailurePolicy_ContinueOnFailure_AllFailures() + { + var nonExistentBucket = "non-existent-" + Guid.NewGuid().ToString("N"); + var directory = CreateTestDirectory(1 * KILO_SIZE, numberOfTestFiles: 3); + + try + { + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = nonExistentBucket, + Directory = directory.FullName, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + FailurePolicy = FailurePolicy.ContinueOnFailure, + UploadFilesConcurrently = true + }; + + // ContinueOnFailure should not throw even if all uploads fail + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsUploaded); + Assert.AreEqual(3, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Failure, response.Result); + } + } + finally + { + try { Directory.Delete(directory.FullName, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectoryFailurePolicy_ContinueOnFailure_AllSuccess() + { + var directory = CreateTestDirectory(1 * KILO_SIZE, numberOfTestFiles: 3); + try + { + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = directory.FullName, + KeyPrefix = directory.Name, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + FailurePolicy = FailurePolicy.ContinueOnFailure, + UploadFilesConcurrently = true + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + + // Validate uploaded contents + ValidateDirectoryContents(Client, bucketName, directory.Name, directory, plainTextContentType); + } + } + finally + { + try { Directory.Delete(directory.FullName, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectoryFailurePolicy_AbortOnFailure_Throws() + { + var nonExistentBucket = "non-existent-" + Guid.NewGuid().ToString("N"); + var directory = CreateTestDirectory(1 * KILO_SIZE, numberOfTestFiles: 2); + + try + { + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = nonExistentBucket, + Directory = directory.FullName, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + FailurePolicy = FailurePolicy.AbortOnFailure, + UploadFilesConcurrently = true + }; + + await Assert.ThrowsExceptionAsync(() => transferUtility.UploadDirectoryWithResponseAsync(request)); + } + } + finally + { + try { Directory.Delete(directory.FullName, true); } catch { } + } + } } } diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs new file mode 100644 index 000000000000..4acfe928ec7a --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs @@ -0,0 +1,308 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility upload directory lifecycle events. + /// Tests the initiated, completed, and failed events for directory uploads. + /// + [TestClass] + public class TransferUtilityUploadDirectoryLifecycleTests : TestBase + { + public static readonly long MEG_SIZE = (int)Math.Pow(2, 20); + public static readonly long KILO_SIZE = (int)Math.Pow(2, 10); + public static readonly string BasePath = Path.Combine(Path.GetTempPath(), "transferutility", "uploaddirectorylifecycle"); + + private static string bucketName; + private static string plainTextContentType = "text/plain"; + + [ClassInitialize()] + public static void ClassInitialize(TestContext a) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + BaseClean(); + if (Directory.Exists(BasePath)) + { + Directory.Delete(BasePath, true); + } + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryInitiatedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Request.BucketName); + Assert.IsNotNull(args.Request.Directory); + + // Verify that total files and bytes are provided in initiated event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + + } + }; + UploadDirectoryWithLifecycleEvents(10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryCompletedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + + // Verify progress information is available in completed event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles, "All files should be transferred"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes, "All bytes should be transferred"); + + // Verify response contains expected data + Assert.AreEqual(args.TransferredFiles, args.Response.ObjectsUploaded, "Response ObjectsUploaded should match TransferredFiles"); + Assert.AreEqual(0, args.Response.ObjectsFailed, "No objects should have failed"); + Assert.AreEqual(DirectoryResult.Success, args.Response.Result, "Result should be Success"); + + } + }; + UploadDirectoryWithLifecycleEvents(12 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryFailedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + } + }; + + // Use an invalid bucket name to force a real exception + // Bucket names with uppercase letters are invalid and will cause an exception + var invalidBucketName = "INVALID-BUCKET-NAME-" + Guid.NewGuid().ToString(); + + var directory = CreateTestDirectory(5 * MEG_SIZE); + var directoryPath = directory.FullName; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = invalidBucketName, // This will cause an exception due to invalid bucket name + Directory = directoryPath, + KeyPrefix = "test-prefix", + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryFailedEvent += eventValidator.OnEventFired; + + try + { + transferUtility.UploadDirectory(request); + Assert.Fail("Expected an exception to be thrown for invalid bucket name"); + } + catch (Exception ex) + { + // Expected exception - the failed event should have been fired + Console.WriteLine($"Expected exception caught: {ex.GetType().Name} - {ex.Message}"); + } + + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryCompleteLifecycleTest() + { + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(bucketName, args.Request.BucketName); + Assert.IsNotNull(args.Request.Directory); + Assert.IsTrue(args.TotalFiles > 0); + Assert.IsTrue(args.TotalBytes > 0); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.IsTrue(args.TotalFiles > 0, "Should have uploaded at least one file"); + Assert.AreEqual(DirectoryResult.Success, args.Response.Result); + } + }; + + UploadDirectoryWithLifecycleEvents(15 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + + #region Helper Methods + + void UploadDirectoryWithLifecycleEvents(long fileSize, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var directory = CreateTestDirectory(fileSize); + var keyPrefix = directory.Name; + var directoryPath = directory.FullName; + + UploadDirectoryWithLifecycleEventsAndDirectory(directoryPath, keyPrefix, initiatedValidator, completedValidator, failedValidator); + } + + void UploadDirectoryWithLifecycleEventsAndDirectory(string directoryPath, string keyPrefix, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = directoryPath, + KeyPrefix = keyPrefix, + ContentType = plainTextContentType, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + if (initiatedValidator != null) + { + request.UploadDirectoryInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.UploadDirectoryCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.UploadDirectoryFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.UploadDirectory(request); + + // Validate uploaded directory contents if it was successful + var directory = new DirectoryInfo(directoryPath); + ValidateDirectoryContentsInS3(Client, bucketName, keyPrefix, directory); + } + + public static DirectoryInfo CreateTestDirectory(long fileSize = 0, int numberOfTestFiles = 3) + { + if (fileSize == 0) + fileSize = 1 * MEG_SIZE; + + var directoryPath = GenerateDirectoryPath(); + for (int i = 0; i < numberOfTestFiles; i++) + { + var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); + UtilityMethods.GenerateFile(filePath, fileSize); + } + + return new DirectoryInfo(directoryPath); + } + + public static string GenerateDirectoryPath(string baseName = "UploadDirectoryLifecycleTest") + { + var directoryName = UtilityMethods.GenerateName(baseName); + var directoryPath = Path.Combine(BasePath, directoryName); + return directoryPath; + } + + public static void ValidateDirectoryContentsInS3(IAmazonS3 s3client, string bucketName, string keyPrefix, DirectoryInfo sourceDirectory) + { + var directoryPath = sourceDirectory.FullName; + var files = sourceDirectory.GetFiles("*", SearchOption.AllDirectories); + foreach (var file in files) + { + var filePath = file.FullName; + var relativePath = filePath.Substring(directoryPath.Length + 1); + var key = (!string.IsNullOrEmpty(keyPrefix) ? keyPrefix + "/" : string.Empty) + relativePath.Replace("\\", "/"); + + // Verify the object exists in S3 + var metadata = s3client.GetObjectMetadata(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + Assert.IsNotNull(metadata, $"Object {key} should exist in S3"); + Console.WriteLine($"Validated object exists in S3: {key}"); + } + } + + #endregion + + #region Shared Helper Classes + + class TransferLifecycleEventValidator + { + public Action Validate { get; set; } + public bool EventFired { get; private set; } + public Exception EventException { get; private set; } + + public void OnEventFired(object sender, T eventArgs) + { + try + { + Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); + Validate?.Invoke(eventArgs); + EventFired = true; // Only set if validation passes + } + catch (Exception ex) + { + EventException = ex; + EventFired = false; // Ensure we don't mark as fired on failure + Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); + // Don't re-throw, let AssertEventFired() handle it + } + } + + public void AssertEventFired() + { + if (EventException != null) + throw EventException; + Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs new file mode 100644 index 000000000000..1f7c9f9d6c05 --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs @@ -0,0 +1,671 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.UploadDirectoryWithResponseAsync functionality. + /// These tests verify end-to-end functionality with actual S3 operations and directory I/O. + /// + /// These integration tests focus on: + /// - Basic directory uploads with response object + /// - Progress tracking with response + /// - Multipart uploads in directory context + /// - Concurrent vs sequential uploads + /// - Nested directory structures + /// - Response validation + /// + [TestClass] + public class TransferUtilityUploadDirectoryWithResponseTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static readonly long KB = 1024; + private static string bucketName; + private static string tempDirectory; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + tempDirectory = Path.Combine(Path.GetTempPath(), "S3UploadDirectoryTests-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempDirectory); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + + // Clean up temp directory + if (Directory.Exists(tempDirectory)) + { + try + { + Directory.Delete(tempDirectory, recursive: true); + } + catch + { + // Best effort cleanup + } + } + + BaseClean(); + } + + [TestCleanup] + public void TestCleanup() + { + // Clean up any test directories after each test + if (Directory.Exists(tempDirectory)) + { + foreach (var subDir in Directory.GetDirectories(tempDirectory)) + { + try + { + Directory.Delete(subDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + #region Basic Upload Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_BasicUpload_ReturnsCorrectResponse() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("basic-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 5; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsUploaded, "ObjectsUploaded should match file count"); + Assert.AreEqual(0, response.ObjectsFailed, "ObjectsFailed should be 0"); + Assert.AreEqual(DirectoryResult.Success, response.Result, "Result should be Success"); + + // Verify all files were uploaded to S3 + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_EmptyDirectory_ReturnsZeroObjectsUploaded() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("empty-directory"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + Directory.CreateDirectory(uploadPath); + + // Act - Upload empty directory + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(0, response.ObjectsUploaded, "ObjectsUploaded should be 0 for empty directory"); + Assert.AreEqual(0, response.ObjectsFailed, "ObjectsFailed should be 0"); + Assert.AreEqual(DirectoryResult.Success, response.Result, "Result should be Success"); + } + + #endregion + + #region Progress Tracking Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_WithProgressTracking_FiresProgressEvents() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("progress-tracking"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + CreateLocalTestDirectory(uploadPath, 5 * MB, fileCount); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.IsTrue(progressEvents.Count > 0, "Progress events should have fired"); + + // Verify final progress event + var finalEvent = progressEvents.Last(); + Assert.AreEqual(fileCount, finalEvent.NumberOfFilesUploaded); + Assert.AreEqual(fileCount, finalEvent.TotalNumberOfFiles); + Assert.AreEqual(finalEvent.TransferredBytes, finalEvent.TotalBytes); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_WithLifecycleEvents_FiresInitiatedAndCompleted() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("lifecycle-events"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + bool initiatedFired = false; + bool completedFired = false; + UploadDirectoryInitiatedEventArgs initiatedArgs = null; + UploadDirectoryCompletedEventArgs completedArgs = null; + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryInitiatedEvent += (sender, args) => + { + initiatedFired = true; + initiatedArgs = args; + }; + + request.UploadDirectoryCompletedEvent += (sender, args) => + { + completedFired = true; + completedArgs = args; + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsTrue(initiatedFired, "Initiated event should have fired"); + Assert.IsTrue(completedFired, "Completed event should have fired"); + + Assert.IsNotNull(initiatedArgs); + Assert.AreEqual(fileCount, initiatedArgs.TotalFiles); + Assert.IsTrue(initiatedArgs.TotalBytes > 0); + + Assert.IsNotNull(completedArgs); + Assert.AreEqual(fileCount, completedArgs.TransferredFiles); + Assert.AreEqual(fileCount, completedArgs.TotalFiles); + Assert.AreEqual(completedArgs.Response, response); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_SequentialMode_IncludesCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-progress"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + CreateLocalTestDirectory(uploadPath, 3 * MB, 3); + + var progressEvents = new List(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = false // Sequential mode + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsUploaded); + + // In sequential mode, should have CurrentFile populated + var eventsWithFile = progressEvents.Where(e => e.CurrentFile != null).ToList(); + Assert.IsTrue(eventsWithFile.Count > 0, "Should have events with CurrentFile populated"); + + foreach (var evt in eventsWithFile) + { + Assert.IsNotNull(evt.CurrentFile); + Assert.IsTrue(evt.TotalNumberOfBytesForCurrentFile > 0); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_ConcurrentMode_OmitsCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-progress"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + CreateLocalTestDirectory(uploadPath, 3 * MB, 4); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = true // Concurrent mode + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(4, response.ObjectsUploaded); + Assert.IsTrue(progressEvents.Count > 0); + + // In concurrent mode, CurrentFile should be null + foreach (var evt in progressEvents) + { + Assert.IsNull(evt.CurrentFile, "CurrentFile should be null in concurrent mode"); + Assert.AreEqual(0, evt.TransferredBytesForCurrentFile); + Assert.AreEqual(0, evt.TotalNumberOfBytesForCurrentFile); + } + } + + #endregion + + #region Multipart Upload Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + [TestCategory("Multipart")] + public async Task UploadDirectoryWithResponse_WithMultipartFiles_UploadsSuccessfully() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("multipart-directory"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + // Create directory with large files to trigger multipart (>16MB threshold) + CreateLocalTestDirectory(uploadPath, 20 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + + // Verify all files uploaded with correct sizes + await VerifyObjectsInS3WithSize(keyPrefix, fileCount, 20 * MB); + } + + #endregion + + #region Nested Directory Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_NestedDirectories_PreservesStructure() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("nested-structure"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + // Create nested directory structure + var nestedFiles = new Dictionary + { + { "level1/file1.txt", 1 * MB }, + { "level1/level2/file2.txt", 2 * MB }, + { "level1/level2/level3/file3.txt", 3 * MB } + }; + + CreateLocalTestDirectoryWithStructure(uploadPath, nestedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(nestedFiles.Count, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + // Verify S3 keys have proper structure + foreach (var kvp in nestedFiles) + { + var expectedKey = keyPrefix + "/" + kvp.Key.Replace('\\', '/'); + await VerifyObjectExistsInS3(expectedKey, kvp.Value); + } + } + + #endregion + + #region Concurrent vs Sequential Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_ConcurrentMode_UploadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 10; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = true + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_SequentialMode_UploadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 5; + + CreateLocalTestDirectory(uploadPath, 3 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = false + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + #endregion + + #region Mixed File Size Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_MixedFileSizes_UploadsAll() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("mixed-sizes"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + var mixedFiles = new Dictionary + { + { "tiny.txt", 100 }, // 100 bytes + { "small.txt", 512 * KB }, // 512 KB + { "medium.txt", 5 * MB }, // 5 MB + { "large.txt", 20 * MB } // 20 MB (multipart) + }; + + CreateLocalTestDirectoryWithStructure(uploadPath, mixedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(mixedFiles.Count, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + // Verify each file's size in S3 + foreach (var kvp in mixedFiles) + { + var s3Key = keyPrefix + "/" + kvp.Key; + await VerifyObjectExistsInS3(s3Key, kvp.Value); + } + } + + #endregion + + #region Helper Methods + + /// + /// Creates a local test directory with specified number of files. + /// + private static void CreateLocalTestDirectory(string directoryPath, long fileSize, int fileCount) + { + Directory.CreateDirectory(directoryPath); + + for (int i = 0; i < fileCount; i++) + { + var fileName = $"file{i}.dat"; + var filePath = Path.Combine(directoryPath, fileName); + UtilityMethods.GenerateFile(filePath, fileSize); + } + } + + /// + /// Creates a local test directory with specific file structure. + /// + private static void CreateLocalTestDirectoryWithStructure(string directoryPath, Dictionary files) + { + foreach (var kvp in files) + { + var filePath = Path.Combine(directoryPath, kvp.Key.Replace('/', Path.DirectorySeparatorChar)); + var directory = Path.GetDirectoryName(filePath); + + if (!string.IsNullOrEmpty(directory)) + { + Directory.CreateDirectory(directory); + } + + UtilityMethods.GenerateFile(filePath, kvp.Value); + } + } + + /// + /// Verifies that the expected number of objects exist in S3 under the given prefix. + /// + private static async Task VerifyObjectsInS3(string keyPrefix, int expectedCount) + { + var listRequest = new ListObjectsV2Request + { + BucketName = bucketName, + Prefix = keyPrefix + "/" + }; + + var listResponse = await Client.ListObjectsV2Async(listRequest); + + // Filter out directory markers + var actualObjects = listResponse.S3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + Assert.AreEqual(expectedCount, actualObjects.Count, + $"Expected {expectedCount} objects in S3 under prefix '{keyPrefix}', found {actualObjects.Count}"); + } + + /// + /// Verifies that the expected number of objects exist in S3 with the specified size. + /// + private static async Task VerifyObjectsInS3WithSize(string keyPrefix, int expectedCount, long expectedSize) + { + var listRequest = new ListObjectsV2Request + { + BucketName = bucketName, + Prefix = keyPrefix + "/" + }; + + var listResponse = await Client.ListObjectsV2Async(listRequest); + + var actualObjects = listResponse.S3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + Assert.AreEqual(expectedCount, actualObjects.Count); + + foreach (var s3Object in actualObjects) + { + Assert.AreEqual(expectedSize, s3Object.Size, + $"Object {s3Object.Key} should be {expectedSize} bytes"); + } + } + + /// + /// Verifies that a specific object exists in S3 with the expected size. + /// + private static async Task VerifyObjectExistsInS3(string key, long expectedSize) + { + var getRequest = new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }; + + var metadata = await Client.GetObjectMetadataAsync(getRequest); + + Assert.IsNotNull(metadata, $"Object should exist in S3: {key}"); + Assert.AreEqual(expectedSize, metadata.ContentLength, + $"Object {key} should be {expectedSize} bytes"); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/AtomicFileHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/AtomicFileHandlerTests.cs new file mode 100644 index 000000000000..89ce344513a6 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/AtomicFileHandlerTests.cs @@ -0,0 +1,670 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Linq; +using System.Text.RegularExpressions; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class AtomicFileHandlerTests + { + private string _testDirectory; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_CreatesHandler() + { + // Act + var handler = new AtomicFileHandler(); + + // Assert + Assert.IsNotNull(handler); + } + + #endregion + + #region CreateTemporaryFile Tests + + [TestMethod] + public void CreateTemporaryFile_CreatesFileWithS3TmpPattern() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + Assert.IsTrue(tempPath.Contains(".s3tmp.")); + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_Generates8CharacterUniqueId() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert - Extract unique ID from pattern: {dest}.s3tmp.{8-char-id} + var match = Regex.Match(tempPath, @"\.s3tmp\.([A-Z2-7]{8})$"); + Assert.IsTrue(match.Success, $"Temp file path '{tempPath}' doesn't match expected pattern"); + Assert.AreEqual(8, match.Groups[1].Value.Length); + } + + [TestMethod] + public void CreateTemporaryFile_CreatesDirectoryIfDoesntExist() + { + // Arrange + var handler = new AtomicFileHandler(); + var nestedDir = Path.Combine(_testDirectory, "level1", "level2", "level3"); + var destinationPath = Path.Combine(nestedDir, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + Assert.IsTrue(Directory.Exists(nestedDir)); + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_GeneratesUniqueNamesOnCollision() + { + // Arrange + var handler1 = new AtomicFileHandler(); + var handler2 = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath1 = handler1.CreateTemporaryFile(destinationPath); + var tempPath2 = handler2.CreateTemporaryFile(destinationPath); + + // Assert + Assert.AreNotEqual(tempPath1, tempPath2); + Assert.IsTrue(File.Exists(tempPath1)); + Assert.IsTrue(File.Exists(tempPath2)); + } + + [TestMethod] + public void CreateTemporaryFile_CreatesEmptyFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + var fileInfo = new FileInfo(tempPath); + Assert.AreEqual(0, fileInfo.Length); + } + + [TestMethod] + public void CreateTemporaryFile_ReturnsCorrectTempFilePath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Assert + Assert.IsTrue(tempPath.StartsWith(destinationPath)); + Assert.IsTrue(tempPath.Contains(".s3tmp.")); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CreateTemporaryFile_WithNullDestinationPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + + // Act + handler.CreateTemporaryFile(null); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CreateTemporaryFile_WithEmptyDestinationPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + + // Act + handler.CreateTemporaryFile(""); + } + + #endregion + + #region CreateTemporaryFile Tests - Path Handling + + [TestMethod] + public void CreateTemporaryFile_WithAbsolutePath_CreatesFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var absolutePath = Path.Combine(_testDirectory, "absolute.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(absolutePath); + + // Assert + Assert.IsTrue(Path.IsPathRooted(tempPath)); + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_WithRelativePath_CreatesFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var relativePath = "relative.dat"; + + // Act + var tempPath = handler.CreateTemporaryFile(relativePath); + + // Assert + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_WithSpecialCharactersInPath_CreatesFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var specialPath = Path.Combine(_testDirectory, "test[1]@2024.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(specialPath); + + // Assert + Assert.IsTrue(File.Exists(tempPath)); + } + + [TestMethod] + public void CreateTemporaryFile_WithDeepDirectoryStructure_CreatesAllNestedDirectories() + { + // Arrange + var handler = new AtomicFileHandler(); + var deepPath = Path.Combine(_testDirectory, "a", "b", "c", "d", "e", "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(deepPath); + + // Assert + Assert.IsTrue(Directory.Exists(Path.GetDirectoryName(deepPath))); + Assert.IsTrue(File.Exists(tempPath)); + } + + #endregion + + #region CommitFile Tests + + [TestMethod] + public void CommitFile_MovesTempFileToDestination() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "final.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Write some data to temp file + File.WriteAllText(tempPath, "test content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + Assert.AreEqual("test content", File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void CommitFile_OverwritesExistingDestination() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "existing.dat"); + + // Create existing file + File.WriteAllText(destinationPath, "old content"); + + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "new content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual("new content", File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void CommitFile_ClearsInternalTempFilePath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + handler.Dispose(); // Should not try to cleanup already-committed file + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CommitFile_WithNullTempPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + handler.CommitFile(null, destinationPath); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void CommitFile_WithNullDestinationPath_ThrowsArgumentException() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempPath = Path.Combine(_testDirectory, "temp.dat"); + File.Create(tempPath).Dispose(); + + // Act + handler.CommitFile(tempPath, null); + } + + [TestMethod] + [ExpectedException(typeof(FileNotFoundException))] + public void CommitFile_WithMissingTempFile_ThrowsFileNotFoundException() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempPath = Path.Combine(_testDirectory, "nonexistent.s3tmp.ABCD1234"); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + handler.CommitFile(tempPath, destinationPath); + } + + [TestMethod] + public void CommitFile_ToSameDirectory_Succeeds() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "file.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CommitFile_ToDifferentDirectory_Succeeds() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempDir = Path.Combine(_testDirectory, "temp"); + var finalDir = Path.Combine(_testDirectory, "final"); + Directory.CreateDirectory(tempDir); + Directory.CreateDirectory(finalDir); + + var tempPath = Path.Combine(tempDir, "file.s3tmp.ABCD1234"); + var destinationPath = Path.Combine(finalDir, "file.dat"); + File.WriteAllText(tempPath, "content"); + + // Act + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + } + + #endregion + + #region CleanupOnFailure Tests + + [TestMethod] + public void CleanupOnFailure_DeletesTempFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CleanupOnFailure_WithExplicitPath_DeletesSpecifiedFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var tempPath = Path.Combine(_testDirectory, "explicit.s3tmp.ABCD1234"); + File.Create(tempPath).Dispose(); + + // Act + handler.CleanupOnFailure(tempPath); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CleanupOnFailure_WithMissingFile_DoesNotThrow() + { + // Arrange + var handler = new AtomicFileHandler(); + + // Act & Assert - Should not throw + handler.CleanupOnFailure(); + } + + [TestMethod] + public void CleanupOnFailure_ClearsInternalTempFilePath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(); + handler.CleanupOnFailure(); // Second call should be safe + + // Assert - No exception thrown + } + + [TestMethod] + public void CleanupOnFailure_CanBeCalledMultipleTimes() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + + // Act & Assert - Should not throw + handler.CleanupOnFailure(); + handler.CleanupOnFailure(); + handler.CleanupOnFailure(); + } + + [TestMethod] + public void CleanupOnFailure_WithNullPath_UsesInternalPath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(null); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void CleanupOnFailure_WithEmptyPath_UsesInternalPath() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.CleanupOnFailure(""); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + #endregion + + #region GenerateRandomId Tests + + [TestMethod] + public void GenerateRandomId_GeneratesDifferentIdsOnSuccessiveCalls() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act - Create multiple temp files to generate multiple IDs + var tempPath1 = handler.CreateTemporaryFile(destinationPath); + var handler2 = new AtomicFileHandler(); + var tempPath2 = handler2.CreateTemporaryFile(destinationPath); + var handler3 = new AtomicFileHandler(); + var tempPath3 = handler3.CreateTemporaryFile(destinationPath); + + // Extract IDs + var id1 = Regex.Match(tempPath1, @"\.s3tmp\.([A-Z2-7]{8})$").Groups[1].Value; + var id2 = Regex.Match(tempPath2, @"\.s3tmp\.([A-Z2-7]{8})$").Groups[1].Value; + var id3 = Regex.Match(tempPath3, @"\.s3tmp\.([A-Z2-7]{8})$").Groups[1].Value; + + // Assert - IDs should be different (statistically) + Assert.IsFalse(id1 == id2 && id2 == id3, "All three IDs are identical - very unlikely with proper random generation"); + } + + [TestMethod] + public void GenerateRandomId_UsesBase32CharacterSet() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + var match = Regex.Match(tempPath, @"\.s3tmp\.([A-Z2-7]{8})$"); + var id = match.Groups[1].Value; + + // Assert - Should only contain A-Z and 2-7 (RFC 4648 base32) + Assert.IsTrue(Regex.IsMatch(id, "^[A-Z2-7]+$"), $"ID '{id}' contains invalid base32 characters"); + } + + [TestMethod] + public void GenerateRandomId_IdsAreFilesystemSafe() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + var match = Regex.Match(tempPath, @"\.s3tmp\.([A-Z2-7]{8})$"); + var id = match.Groups[1].Value; + + // Assert - No problematic characters (/, \, :, *, ?, ", <, >, |) + var problematicChars = new[] { '/', '\\', ':', '*', '?', '"', '<', '>', '|' }; + Assert.IsFalse(id.Any(c => problematicChars.Contains(c))); + } + + #endregion + + #region Dispose Tests + + [TestMethod] + public void Dispose_CallsCleanupOnUncommittedFile() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + + // Act + handler.Dispose(); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + } + + [TestMethod] + public void Dispose_CanBeCalledMultipleTimes() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + + // Act & Assert - Should not throw + handler.Dispose(); + handler.Dispose(); + handler.Dispose(); + } + + [TestMethod] + public void Dispose_DoesNotCleanupCommittedFiles() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "content"); + handler.CommitFile(tempPath, destinationPath); + + // Act + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual("content", File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void Dispose_SafeAfterManualCleanup() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + handler.CreateTemporaryFile(destinationPath); + handler.CleanupOnFailure(); + + // Act & Assert - Should not throw + handler.Dispose(); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public void Integration_CreateWriteCommit_Success() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "final.dat"); + var testData = "Integration test content"; + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, testData); + handler.CommitFile(tempPath, destinationPath); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsFalse(File.Exists(tempPath)); + Assert.AreEqual(testData, File.ReadAllText(destinationPath)); + } + + [TestMethod] + public void Integration_CreateWriteCleanup_Success() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "temp.dat"); + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, "temporary content"); + handler.CleanupOnFailure(); + + // Assert + Assert.IsFalse(File.Exists(tempPath)); + Assert.IsFalse(File.Exists(destinationPath)); + } + + [TestMethod] + public void Integration_ConcurrentTempFileCreation_AllFilesUnique() + { + // Arrange + var handlers = Enumerable.Range(0, 10).Select(_ => new AtomicFileHandler()).ToArray(); + var destinationPath = Path.Combine(_testDirectory, "concurrent.dat"); + + // Act + var tempPaths = handlers.Select(h => h.CreateTemporaryFile(destinationPath)).ToArray(); + + // Assert - All paths should be unique + Assert.AreEqual(10, tempPaths.Distinct().Count()); + Assert.IsTrue(tempPaths.All(File.Exists)); + + // Cleanup + foreach (var handler in handlers) + { + handler.Dispose(); + } + } + + [TestMethod] + public void Integration_VerifyFileAtomicity_NoPartialWrites() + { + // Arrange + var handler = new AtomicFileHandler(); + var destinationPath = Path.Combine(_testDirectory, "atomic.dat"); + var largeData = new string('A', 1024 * 1024); // 1MB of data + + // Act + var tempPath = handler.CreateTemporaryFile(destinationPath); + File.WriteAllText(tempPath, largeData); + handler.CommitFile(tempPath, destinationPath); + + // Assert + var finalContent = File.ReadAllText(destinationPath); + Assert.AreEqual(largeData.Length, finalContent.Length); + Assert.IsTrue(finalContent.All(c => c == 'A')); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedDataSourceTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedDataSourceTests.cs new file mode 100644 index 000000000000..24b25b97ea93 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedDataSourceTests.cs @@ -0,0 +1,487 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Buffers; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for BufferedDataSource class. + /// Tests reading from pre-buffered StreamPartBuffer data. + /// + [TestClass] + public class BufferedDataSourceTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidPartBuffer_CreatesDataSource() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + + // Act + var dataSource = new BufferedDataSource(partBuffer); + + // Assert + Assert.IsNotNull(dataSource); + Assert.AreEqual(1, dataSource.PartNumber); + Assert.IsFalse(dataSource.IsComplete); + + // Cleanup + dataSource.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullPartBuffer_ThrowsArgumentNullException() + { + // Act + var dataSource = new BufferedDataSource(null); + + // Assert - ExpectedException + } + + #endregion + + #region Property Tests + + [TestMethod] + public void PartNumber_ReturnsPartBufferPartNumber() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(5, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act & Assert + Assert.AreEqual(5, dataSource.PartNumber); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public void IsComplete_WhenNoRemainingBytes_ReturnsTrue() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + partBuffer.CurrentPosition = 512; // Move to end + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act & Assert + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public void IsComplete_WhenRemainingBytes_ReturnsFalse() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act & Assert + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Happy Path + + [TestMethod] + public async Task ReadAsync_ReadsDataFromPartBuffer() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[512]; + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WithPartialRead_ReturnsRequestedBytes() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[256]; + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 256, CancellationToken.None); + + // Assert + Assert.AreEqual(256, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 256)); + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WithFullRead_ReadsAllRemainingBytes() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[1024]; // Larger than available + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 1024, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); // Only 512 available + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WhenComplete_ReturnsZero() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + partBuffer.CurrentPosition = 512; // Move to end + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[256]; + + try + { + // Act + int bytesRead = await dataSource.ReadAsync(readBuffer, 0, 256, CancellationToken.None); + + // Assert + Assert.AreEqual(0, bytesRead); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + try + { + // Act + await dataSource.ReadAsync(null, 0, 100, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + try + { + // Act + await dataSource.ReadAsync(readBuffer, -1, 100, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + try + { + // Act + await dataSource.ReadAsync(readBuffer, 0, -1, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + try + { + // Act - offset + count > buffer.Length + await dataSource.ReadAsync(readBuffer, 100, 200, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Multiple Reads + + [TestMethod] + public async Task ReadAsync_MultipleReads_ConsumesAllData() + { + // Arrange + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer1 = new byte[256]; + byte[] readBuffer2 = new byte[256]; + + try + { + // Act - Read in two chunks + int bytesRead1 = await dataSource.ReadAsync(readBuffer1, 0, 256, CancellationToken.None); + int bytesRead2 = await dataSource.ReadAsync(readBuffer2, 0, 256, CancellationToken.None); + + // Assert + Assert.AreEqual(256, bytesRead1); + Assert.AreEqual(256, bytesRead2); + Assert.IsTrue(dataSource.IsComplete); + + // Verify data correctness + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer1, 0, 256)); + + // Extract second segment manually for .NET Framework compatibility + byte[] secondSegment = new byte[256]; + Buffer.BlockCopy(testData, 256, secondSegment, 0, 256); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + secondSegment, + readBuffer2, + 0, + 256)); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_ReadingToEnd_ReturnsZeroOnSubsequentReads() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + byte[] readBuffer = new byte[512]; + + try + { + // Act - Read all data + int bytesRead1 = await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Try to read again + int bytesRead2 = await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead1); + Assert.AreEqual(0, bytesRead2); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region Error Handling Tests + + [TestMethod] + public async Task ReadAsync_WhenExceptionDuringRead_MarksBufferConsumed() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Create a buffer that will cause BlockCopy to fail (null destination) + byte[] readBuffer = null; + + try + { + // Act & Assert - Should throw ArgumentNullException + await Assert.ThrowsExceptionAsync(async () => + { + await dataSource.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Verify buffer was marked as consumed (position set to Length) + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_DisposesUnderlyingPartBuffer() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Act + dataSource.Dispose(); + + // Assert - The underlying part buffer should be disposed (ArrayPoolBuffer nulled) + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Act - Dispose multiple times + dataSource.Dispose(); + dataSource.Dispose(); + dataSource.Dispose(); + + // Assert - Should not throw + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task ReadAsync_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + byte[] readBuffer = new byte[256]; + + // Dispose the data source + dataSource.Dispose(); + + // Act - Try to read after disposal + await dataSource.ReadAsync(readBuffer, 0, 256, CancellationToken.None); + + // Assert - ExpectedException + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedDownloadConfigurationTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedDownloadConfigurationTests.cs new file mode 100644 index 000000000000..f173be94eb20 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedDownloadConfigurationTests.cs @@ -0,0 +1,151 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for BufferedDownloadConfiguration class. + /// Tests configuration validation and parameter handling. + /// + [TestClass] + public class BufferedDownloadConfigurationTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesConfiguration() + { + // Arrange + int concurrentRequests = 10; + int maxInMemoryParts = 5; + int bufferSize = 8192; + long targetPartSize = 8 * 1024 * 1024; // 8MB + + // Act + var config = new BufferedDownloadConfiguration(concurrentRequests, maxInMemoryParts, bufferSize, targetPartSize); + + // Assert + Assert.AreEqual(concurrentRequests, config.ConcurrentServiceRequests); + Assert.AreEqual(maxInMemoryParts, config.MaxInMemoryParts); + Assert.AreEqual(bufferSize, config.BufferSize); + Assert.AreEqual(targetPartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_WithCustomPartSize_UsesProvidedValue() + { + // Arrange + long expectedPartSize = 10 * 1024 * 1024; // 10MB + + // Act + var config = new BufferedDownloadConfiguration(10, 5, 8192, expectedPartSize); + + // Assert + Assert.AreEqual(expectedPartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_WithVeryLargeValues_HandlesCorrectly() + { + // Arrange + int largeValue = int.MaxValue; + long largePartSize = long.MaxValue / 2; + + // Act + var config = new BufferedDownloadConfiguration(largeValue, largeValue, largeValue, largePartSize); + + // Assert + Assert.AreEqual(largeValue, config.ConcurrentServiceRequests); + Assert.AreEqual(largeValue, config.MaxInMemoryParts); + Assert.AreEqual(largeValue, config.BufferSize); + Assert.AreEqual(largePartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_With8MBPartSize_StoresCorrectValue() + { + // Arrange + long partSize = 8 * 1024 * 1024; // 8MB + + // Act + var config = new BufferedDownloadConfiguration(1, 1, 1, partSize); + + // Assert + Assert.AreEqual(1, config.ConcurrentServiceRequests); + Assert.AreEqual(1, config.MaxInMemoryParts); + Assert.AreEqual(1, config.BufferSize); + Assert.AreEqual(partSize, config.TargetPartSizeBytes); + } + + #endregion + + #region Validation Tests + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeConcurrentRequests_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(-1, 5, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroConcurrentRequests_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(0, 5, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeMaxInMemoryParts_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, -1, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroMaxInMemoryParts_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 0, 8192, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeTargetPartSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, 8192, -1L); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroTargetPartSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, 8192, 0L); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeBufferSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, -1, 8 * 1024 * 1024); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroBufferSize_ThrowsException() + { + // Act & Assert - ExpectedException + var config = new BufferedDownloadConfiguration(10, 5, 0, 8 * 1024 * 1024); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs new file mode 100644 index 000000000000..29f966f34569 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedMultipartStreamTests.cs @@ -0,0 +1,1401 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class BufferedMultipartStreamTests + { + #region Test Constants + + private const int EMPTY_OBJECT_SIZE = 0; + private const int SMALL_OBJECT_SIZE = 512; + private const int MEDIUM_OBJECT_SIZE = 1024; + private const int LARGE_OBJECT_SIZE = 4096; + private const int VERY_LARGE_OBJECT_SIZE = 50 * 1024 * 1024; + private const int DEFAULT_PART_SIZE = 5 * 1024 * 1024; + private const int SMALL_CHUNK_SIZE = 128; + private const int MEDIUM_CHUNK_SIZE = 256; + private const int LARGE_CHUNK_SIZE = 512; + + #endregion + + #region Test Setup Fields + + private Mock _mockCoordinator; + private Mock _mockBufferManager; + private BufferedDownloadConfiguration _config; + + #endregion + + #region Test Initialization + + [TestInitialize] + public void Setup() + { + _mockCoordinator = new Mock(); + _mockBufferManager = new Mock(); + _config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + } + + #endregion + + #region Helper Methods + + private BufferedMultipartStream CreateStream() + { + return new BufferedMultipartStream(_mockCoordinator.Object, _mockBufferManager.Object, _config); + } + + private async Task CreateInitializedStreamAsync( + long objectSize = MEDIUM_OBJECT_SIZE, + int totalParts = 1) + { + var mockResponse = totalParts == 1 + ? MultipartDownloadTestHelpers.CreateSinglePartResponse(objectSize) + : new GetObjectResponse(); + + var discoveryResult = new DownloadResult + { + TotalParts = totalParts, + ObjectSize = objectSize, + InitialResponse = mockResponse + }; + + _mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) + .ReturnsAsync(discoveryResult); + + var stream = CreateStream(); + await stream.InitializeAsync(CancellationToken.None); + return stream; + } + + #endregion + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidDependencies_CreatesStream() + { + // Act + var stream = CreateStream(); + + // Assert + Assert.IsNotNull(stream); + Assert.IsTrue(stream.CanRead); + Assert.IsFalse(stream.CanSeek); + Assert.IsFalse(stream.CanWrite); + } + + [DataTestMethod] + [DataRow(null, "bufferManager", "config", DisplayName = "Null Coordinator")] + [DataRow("coordinator", null, "config", DisplayName = "Null Buffer Manager")] + [DataRow("coordinator", "bufferManager", null, DisplayName = "Null Config")] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullParameter_ThrowsArgumentNullException( + string coordinatorKey, string bufferManagerKey, string configKey) + { + // Arrange + var coordinator = coordinatorKey != null ? _mockCoordinator.Object : null; + var bufferManager = bufferManagerKey != null ? _mockBufferManager.Object : null; + var config = configKey != null ? _config : null; + + // Act + var stream = new BufferedMultipartStream(coordinator, bufferManager, config); + } + + #endregion + + #region Factory Method Tests + + [TestMethod] + public void Create_WithValidParameters_CreatesStream() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + + // Act + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + // Assert + Assert.IsNotNull(stream); + Assert.IsNull(stream.DiscoveryResult); // Not initialized yet + } + + [DataTestMethod] + [DataRow(null, "request", "config", DisplayName = "Null S3 Client")] + [DataRow("client", null, "config", DisplayName = "Null Request")] + [DataRow("client", "request", null, DisplayName = "Null Transfer Config")] + [ExpectedException(typeof(ArgumentNullException))] + public void Create_WithNullParameter_ThrowsArgumentNullException( + string clientKey, string requestKey, string configKey) + { + // Arrange + var client = clientKey != null ? MultipartDownloadTestHelpers.CreateMockS3Client().Object : null; + var request = requestKey != null ? MultipartDownloadTestHelpers.CreateOpenStreamRequest() : null; + var config = configKey != null ? new TransferUtilityConfig() : null; + + // Act + var stream = BufferedMultipartStream.Create(client, request, config); + } + + #endregion + + #region InitializeAsync Tests - Single Part + + [TestMethod] + public async Task InitializeAsync_SinglePart_SetsCorrectDiscoveryResult() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var discoveryResult = new DownloadResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = mockResponse + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(stream.DiscoveryResult); + Assert.AreEqual(1, stream.DiscoveryResult.TotalParts); + Assert.AreEqual(1024, stream.DiscoveryResult.ObjectSize); + } + + + [TestMethod] + public async Task InitializeAsync_Multipart_UsesMultipartHandler() + { + // Arrange + var discoveryResult = new DownloadResult + { + TotalParts = 5, + ObjectSize = 50 * 1024 * 1024, + InitialResponse = new GetObjectResponse() + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(5, stream.DiscoveryResult.TotalParts); + } + + [TestMethod] + public async Task InitializeAsync_SinglePart_CallsStartDownloads() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var discoveryResult = new DownloadResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = mockResponse + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + mockCoordinator.Verify( + x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny()), + Times.Once); + } + + #endregion + + #region InitializeAsync Tests - Multipart + + [TestMethod] + public async Task InitializeAsync_Multipart_StartsDownloads() + { + // Arrange + var discoveryResult = new DownloadResult + { + TotalParts = 5, + ObjectSize = 50 * 1024 * 1024, + InitialResponse = new GetObjectResponse() + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + mockCoordinator.Verify( + x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny()), + Times.Once); + } + + #endregion + + #region InitializeAsync Tests - State Management + + [TestMethod] + public async Task InitializeAsync_SetsDiscoveryResult() + { + // Arrange + var discoveryResult = new DownloadResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024) + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(stream.DiscoveryResult); + Assert.AreEqual(discoveryResult, stream.DiscoveryResult); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task InitializeAsync_CalledTwice_ThrowsInvalidOperationException() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var discoveryResult = new DownloadResult + { + TotalParts = 1, + ObjectSize = 1024, + InitialResponse = mockResponse + }; + + var mockCoordinator = new Mock(); + mockCoordinator.Setup(x => x.StartDownloadAsync( + It.IsAny>(), It.IsAny())) + .ReturnsAsync(discoveryResult); + + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + // Act + await stream.InitializeAsync(CancellationToken.None); + await stream.InitializeAsync(CancellationToken.None); // Second call should throw + } + + #endregion + + #region ReadAsync Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ReadAsync_BeforeInitialize_ThrowsInvalidOperationException() + { + // Arrange + var mockCoordinator = new Mock(); + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + var buffer = new byte[1024]; + + // Act + await stream.ReadAsync(buffer, 0, buffer.Length); + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task ReadAsync_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var mockCoordinator = new Mock(); + var mockBufferManager = new Mock(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var stream = new BufferedMultipartStream(mockCoordinator.Object, mockBufferManager.Object, config); + + stream.Dispose(); + var buffer = new byte[1024]; + + // Act + await stream.ReadAsync(buffer, 0, buffer.Length); + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(null, 0, MEDIUM_OBJECT_SIZE)); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(buffer, -1, MEDIUM_OBJECT_SIZE)); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(buffer, 0, -1)); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await stream.ReadAsync(buffer, 100, 1000)); // 100 + 1000 > 1024 + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Property Tests + + [TestMethod] + public void StreamCapabilities_HaveCorrectValues() + { + // Arrange + var stream = CreateStream(); + + // Act & Assert + Assert.IsTrue(stream.CanRead, "Stream should be readable"); + Assert.IsFalse(stream.CanSeek, "Stream should not be seekable"); + Assert.IsFalse(stream.CanWrite, "Stream should not be writable"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void Length_BeforeInitialization_ThrowsInvalidOperationException() + { + // Arrange + var stream = CreateStream(); + + // Act + _ = stream.Length; + } + + [TestMethod] + public async Task Length_AfterInitialization_ReturnsObjectSize() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var stream = await CreateInitializedStreamAsync(objectSize: objectSize); + + // Act + var length = stream.Length; + + // Assert + Assert.AreEqual(objectSize, length, "Length should return ObjectSize from discovery result"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Length_ForLargeObject_ReturnsCorrectSize() + { + // Arrange + var objectSize = VERY_LARGE_OBJECT_SIZE; + var stream = await CreateInitializedStreamAsync(objectSize: objectSize, totalParts: 10); + + // Act + var length = stream.Length; + + // Assert + Assert.AreEqual(objectSize, length, "Length should return correct size for large objects"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void Position_BeforeInitialization_ThrowsInvalidOperationException() + { + // Arrange + var stream = CreateStream(); + + // Act + _ = stream.Position; + } + + [TestMethod] + public async Task Position_AfterInitialization_ReturnsZero() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + + // Act + var position = stream.Position; + + // Assert + Assert.AreEqual(0, position, "Position should be 0 before any reads"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_AfterSingleRead_ReturnsCorrectValue() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[SMALL_CHUNK_SIZE]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + var position = stream.Position; + + // Assert + Assert.AreEqual(bytesRead, position, "Position should equal bytes read"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_AfterMultipleReads_AccumulatesCorrectly() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Perform multiple reads + var buffer = new byte[SMALL_CHUNK_SIZE]; + var totalBytesRead = 0; + + var read1 = await stream.ReadAsync(buffer, 0, buffer.Length); + totalBytesRead += read1; + Assert.AreEqual(totalBytesRead, stream.Position, "Position should match after first read"); + + var read2 = await stream.ReadAsync(buffer, 0, buffer.Length); + totalBytesRead += read2; + Assert.AreEqual(totalBytesRead, stream.Position, "Position should accumulate after second read"); + + var read3 = await stream.ReadAsync(buffer, 0, buffer.Length); + totalBytesRead += read3; + Assert.AreEqual(totalBytesRead, stream.Position, "Position should accumulate after third read"); + + // Assert + Assert.IsTrue(totalBytesRead > 0, "Should have read some data"); + Assert.AreEqual(totalBytesRead, stream.Position, "Position should equal total bytes read"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_AtEndOfStream_EqualsLength() + { + // Arrange + var objectSize = SMALL_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream + var buffer = new byte[objectSize]; + await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(stream.Length, stream.Position, + "Position should equal Length after reading entire stream"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_WithZeroByteRead_DoesNotChange() + { + // Arrange + var objectSize = SMALL_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream, then try to read again + var buffer = new byte[objectSize]; + await stream.ReadAsync(buffer, 0, buffer.Length); + var positionAfterFullRead = stream.Position; + + // Try to read past end + await stream.ReadAsync(buffer, 0, buffer.Length); + var positionAfterSecondRead = stream.Position; + + // Assert + Assert.AreEqual(positionAfterFullRead, positionAfterSecondRead, + "Position should not change when read returns 0 bytes"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_SynchronousRead_UpdatesCorrectly() + { + // Arrange + var objectSize = MEDIUM_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act - Use synchronous Read method + var buffer = new byte[SMALL_CHUNK_SIZE]; + var bytesRead = stream.Read(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(bytesRead, stream.Position, + "Position should update correctly for synchronous Read"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Position_LengthAndPosition_ProvideProgressTracking() + { + // Arrange + var objectSize = LARGE_OBJECT_SIZE; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + await stream.InitializeAsync(CancellationToken.None); + + // Act & Assert - Verify progress calculation + var buffer = new byte[MEDIUM_CHUNK_SIZE]; + var totalBytesRead = 0; + + while (true) + { + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + if (bytesRead == 0) break; + + totalBytesRead += bytesRead; + + // Verify progress can be calculated + var progressPercentage = (double)stream.Position / stream.Length * 100; + Assert.IsTrue(progressPercentage >= 0 && progressPercentage <= 100, + "Progress percentage should be between 0 and 100"); + Assert.AreEqual(totalBytesRead, stream.Position, + "Position should track total bytes read"); + } + + // Final verification + Assert.AreEqual(objectSize, totalBytesRead, "Should read entire object"); + Assert.AreEqual(100.0, (double)stream.Position / stream.Length * 100, + "Progress should be 100% at completion"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(NotSupportedException))] + public async Task Position_Setter_ThrowsNotSupportedException() + { + // Arrange + var stream = await CreateInitializedStreamAsync(); + + // Act + stream.Position = 100; + } + + #endregion + + #region Unsupported Operation Tests + + [DataTestMethod] + [DataRow("Seek", DisplayName = "Seek Operation")] + [DataRow("SetLength", DisplayName = "SetLength Operation")] + [DataRow("Write", DisplayName = "Write Operation")] + public void UnsupportedOperations_ThrowNotSupportedException(string operation) + { + // Arrange + var stream = CreateStream(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act & Assert + Assert.ThrowsException(() => + { + switch (operation) + { + case "Seek": + stream.Seek(0, SeekOrigin.Begin); + break; + case "SetLength": + stream.SetLength(MEDIUM_OBJECT_SIZE); + break; + case "Write": + stream.Write(buffer, 0, buffer.Length); + break; + } + }); + } + + #endregion + + #region Flush Tests + + [TestMethod] + public void Flush_DoesNotThrow() + { + // Arrange + var stream = CreateStream(); + + // Act + stream.Flush(); // Should not throw + + // Assert - no exception + } + + [TestMethod] + public async Task FlushAsync_Completes() + { + // Arrange + var stream = CreateStream(); + + // Act + await stream.FlushAsync(CancellationToken.None); // Should complete without error + + // Assert - no exception + } + + #endregion + + #region MaxInMemoryParts Tests + + [TestMethod] + public void Create_UsesRequestMaxInMemoryParts_NotConfig() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = 256; // Set custom value on request + + var transferConfig = new TransferUtilityConfig + { + ConcurrentServiceRequests = 20 // TransferUtilityConfig no longer has MaxInMemoryParts + }; + + // Act + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + // Assert + Assert.IsNotNull(stream); + // Verify the stream was created successfully with request's MaxInMemoryParts + } + + [TestMethod] + public async Task Create_WithCustomMaxInMemoryParts_FlowsToConfiguration() + { + // Arrange + var customMaxParts = 512; + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = customMaxParts; + + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read from stream to verify it works with custom MaxInMemoryParts + var buffer = new byte[512]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, "Should successfully read with custom MaxInMemoryParts"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task Create_WithDefaultMaxInMemoryParts_UsesRequestDefault() + { + // Arrange - Don't set MaxInMemoryParts explicitly, should use request's default (1024) + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + // Don't set request.MaxInMemoryParts - should default to 1024 + + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[512]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, "Should work with default MaxInMemoryParts from request"); + + // Cleanup + stream.Dispose(); + } + + [DataTestMethod] + [DataRow(1, DisplayName = "Minimum MaxInMemoryParts (1)")] + [DataRow(10, DisplayName = "Small MaxInMemoryParts (10)")] + [DataRow(512, DisplayName = "Medium MaxInMemoryParts (512)")] + [DataRow(2048, DisplayName = "Large MaxInMemoryParts (2048)")] + public async Task Create_WithVariousMaxInMemoryParts_WorksCorrectly(int maxInMemoryParts) + { + // Arrange + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = maxInMemoryParts; + + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 2 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read some data + var buffer = new byte[1024]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, + $"Should successfully process download with MaxInMemoryParts={maxInMemoryParts}"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Synchronous Read Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void Read_BeforeInitialize_ThrowsInvalidOperationException() + { + // Arrange + var stream = CreateStream(); + var buffer = new byte[MEDIUM_OBJECT_SIZE]; + + // Act + stream.Read(buffer, 0, buffer.Length); + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var stream = CreateStream(); + + // Act + stream.Dispose(); + stream.Dispose(); // Second call should not throw + + // Assert - no exception + } + + [TestMethod] + public void Dispose_SuppressesExceptions() + { + // Arrange + _mockCoordinator.Setup(x => x.Dispose()).Throws(); + _mockBufferManager.Setup(x => x.Dispose()).Throws(); + + var stream = CreateStream(); + + // Act + stream.Dispose(); // Should not propagate exceptions + + // Assert - no exception thrown + } + + #endregion + + #region Stream Reading Behavior Tests - Empty Object + + [TestMethod] + public async Task ReadAsync_EmptyObject_ReturnsZero() + { + // Arrange - Empty object (0 bytes) + var testData = new byte[0]; + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse(0, null, null, "empty-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client((req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[1024]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(0, bytesRead, "Empty object should return 0 bytes"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Multiple Consecutive Reads + + [TestMethod] + public async Task ReadAsync_MultipleSmallReads_ReturnsAllData() + { + // Arrange - 1KB object, read in 256-byte chunks + var objectSize = 1024; + var chunkSize = 256; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read in multiple small chunks + var allData = new System.Collections.Generic.List(); + var buffer = new byte[chunkSize]; + int bytesRead; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + allData.AddRange(buffer.Take(bytesRead)); + } + + // Assert + Assert.AreEqual(objectSize, allData.Count, "Should read entire object"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, allData.ToArray(), 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_VaryingBufferSizes_ReturnsCorrectData() + { + // Arrange - 2KB object + var objectSize = 2048; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read with varying buffer sizes + var allData = new System.Collections.Generic.List(); + + // First read: 512 bytes + var buffer1 = new byte[512]; + var read1 = await stream.ReadAsync(buffer1, 0, buffer1.Length); + allData.AddRange(buffer1.Take(read1)); + + // Second read: 1KB + var buffer2 = new byte[1024]; + var read2 = await stream.ReadAsync(buffer2, 0, buffer2.Length); + allData.AddRange(buffer2.Take(read2)); + + // Third read: 256 bytes + var buffer3 = new byte[256]; + var read3 = await stream.ReadAsync(buffer3, 0, buffer3.Length); + allData.AddRange(buffer3.Take(read3)); + + // Fourth read: Remaining data + var buffer4 = new byte[1024]; + var read4 = await stream.ReadAsync(buffer4, 0, buffer4.Length); + allData.AddRange(buffer4.Take(read4)); + + // Assert + Assert.AreEqual(objectSize, allData.Count, "Should read entire object"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, allData.ToArray(), 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - End of Stream + + [TestMethod] + public async Task ReadAsync_PastEndOfStream_ReturnsZero() + { + // Arrange - Small object + var objectSize = 512; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream + var buffer = new byte[objectSize]; + var firstRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Try to read again after reaching end + var secondRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(objectSize, firstRead, "First read should return all data"); + Assert.AreEqual(0, secondRead, "Reading past end should return 0"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_MultipleReadsAtEnd_ConsistentlyReturnsZero() + { + // Arrange + var objectSize = 256; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire stream + var buffer = new byte[objectSize]; + await stream.ReadAsync(buffer, 0, buffer.Length); + + // Try multiple reads after end + var read1 = await stream.ReadAsync(buffer, 0, buffer.Length); + var read2 = await stream.ReadAsync(buffer, 0, buffer.Length); + var read3 = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(0, read1, "First read past end should return 0"); + Assert.AreEqual(0, read2, "Second read past end should return 0"); + Assert.AreEqual(0, read3, "Third read past end should return 0"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Buffer Sizes + + [TestMethod] + public async Task ReadAsync_BufferLargerThanData_ReturnsAvailableData() + { + // Arrange - Small object, large buffer + var objectSize = 512; + var bufferSize = 2048; // Buffer larger than data + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[bufferSize]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.AreEqual(objectSize, bytesRead, "Should return only available data, not buffer size"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_SmallBuffer_RequiresMultipleReads() + { + // Arrange - Larger object, very small buffer + var objectSize = 4096; + var bufferSize = 128; // Very small buffer + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read entire object with small buffer + var allData = new System.Collections.Generic.List(); + var buffer = new byte[bufferSize]; + int bytesRead; + int readCount = 0; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + allData.AddRange(buffer.Take(bytesRead)); + readCount++; + } + + // Assert + Assert.AreEqual(objectSize, allData.Count, "Should read entire object"); + Assert.IsTrue(readCount >= objectSize / bufferSize, + "Should require multiple reads with small buffer"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, allData.ToArray(), 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Multipart Edge Cases + + [TestMethod] + public async Task ReadAsync_ExactPartBoundary_ReadsCorrectly() + { + // Arrange - Object size exactly equals 2 parts + var partSize = 5 * 1024 * 1024; // 5MB + var totalParts = 2; + var objectSize = partSize * totalParts; // 10MB exactly + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, objectSize, "boundary-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read across part boundary + var bufferSize = (int)(partSize + 1024); // Read across boundary + var buffer = new byte[bufferSize]; + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(bytesRead > 0, "Should successfully read across part boundary"); + Assert.AreEqual(Math.Min(bufferSize, objectSize), bytesRead, + "Should read requested amount or remaining data"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_NonAlignedPartBoundary_ReadsCorrectly() + { + // Arrange - Object size not aligned to part boundaries + var partSize = 5 * 1024 * 1024; // 5MB + var remainder = 2 * 1024 * 1024; // 2MB + var objectSize = (2 * partSize) + remainder; // 12MB (2 full parts + 2MB) + var totalParts = 3; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, objectSize, "non-aligned-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read in chunks that don't align with part boundaries + var bufferSize = (int)(3 * 1024 * 1024); // 3MB chunks + var allData = new System.Collections.Generic.List(); + var buffer = new byte[bufferSize]; + int bytesRead; + + while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0) + { + allData.AddRange(buffer.Take(bytesRead)); + } + + // Assert + Assert.AreEqual(objectSize, allData.Count, + "Should read entire object despite non-aligned boundaries"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_BufferLargerThanPartSize_HandlesCorrectly() + { + // Arrange - Buffer larger than part size + var partSize = 5 * 1024 * 1024; // 5MB parts + var bufferSize = (int)(8 * 1024 * 1024); // 8MB buffer (larger than part) + var objectSize = 15 * 1024 * 1024; // 15MB total + var totalParts = 3; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, objectSize, "large-buffer-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + var transferConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read with buffer larger than part size + var buffer = new byte[bufferSize]; + var firstRead = await stream.ReadAsync(buffer, 0, buffer.Length); + + // Assert + Assert.IsTrue(firstRead > 0, "Should successfully read with large buffer"); + Assert.IsTrue(firstRead <= bufferSize, "Should not read more than buffer size"); + + // Cleanup + stream.Dispose(); + } + + #endregion + + #region Stream Reading Behavior Tests - Partial Reads + + [TestMethod] + public async Task ReadAsync_PartialBufferFill_ReturnsAvailableData() + { + // Arrange - Request more data than available + var objectSize = 1024; + var requestedSize = 2048; // Request more than available + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act + var buffer = new byte[requestedSize]; + var bytesRead = await stream.ReadAsync(buffer, 0, requestedSize); + + // Assert + Assert.AreEqual(objectSize, bytesRead, + "Should return available data, not requested amount"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, objectSize), + "Data should match original"); + + // Cleanup + stream.Dispose(); + } + + [TestMethod] + public async Task ReadAsync_ReadIntoMiddleOfBuffer_PositionsCorrectly() + { + // Arrange + var objectSize = 512; + var testData = MultipartDownloadTestHelpers.GenerateTestData(objectSize, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + objectSize, null, null, "test-etag", testData); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var transferConfig = new TransferUtilityConfig(); + var stream = BufferedMultipartStream.Create(mockClient.Object, request, transferConfig); + + await stream.InitializeAsync(CancellationToken.None); + + // Act - Read into middle of buffer + var buffer = new byte[1024]; + var offset = 256; + var count = 512; + var bytesRead = await stream.ReadAsync(buffer, offset, count); + + // Assert + Assert.AreEqual(objectSize, bytesRead, "Should read available data"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, offset, objectSize), + "Data should be at correct offset in buffer"); + + // Cleanup + stream.Dispose(); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs new file mode 100644 index 000000000000..645dae927051 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/BufferedPartDataHandlerTests.cs @@ -0,0 +1,969 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Buffers; +using System.IO; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for BufferedPartDataHandler class. + /// Tests intelligent stream-vs-buffer decision making for multipart downloads. + /// + [TestClass] + public class BufferedPartDataHandlerTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesHandler() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + // Act + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Assert + Assert.IsNotNull(handler); + + // Cleanup + handler.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullBufferManager_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + // Act + var handler = new BufferedPartDataHandler(null, config); + + // Assert - ExpectedException + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfiguration_ThrowsArgumentNullException() + { + // Arrange + var mockBufferManager = new Mock(); + + // Act + var handler = new BufferedPartDataHandler(mockBufferManager.Object, null); + + // Assert - ExpectedException + } + + #endregion + + #region ProcessPartAsync Tests - In-Order (Streaming Path) + + [TestMethod] + public async Task ProcessPartAsync_InOrderPart_CreatesStreamingDataSource() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + IPartDataSource capturedDataSource = null; + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((ds) => capturedDataSource = ds); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + Assert.IsNotNull(capturedDataSource); + Assert.IsInstanceOfType(capturedDataSource, typeof(StreamingDataSource)); + Assert.AreEqual(1, capturedDataSource.PartNumber); + + // Cleanup + capturedDataSource?.Dispose(); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_InOrderPart_ReleasesCapacityImmediately() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert - ReleaseBufferSpace should be called (through ReleaseCapacity) + // Handler calls ReleaseBufferSpace directly, which eventually calls the manager's method + // We verify the AddBuffer was called with a StreamingDataSource + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource)), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_InOrderPart_DoesNotDisposeResponse() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert - Response stream should still be readable (not disposed) + // The StreamingDataSource now owns it and will dispose it later + Assert.IsTrue(response.ResponseStream.CanRead); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_MultipleInOrderParts_AllStreamDirectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + var streamingCount = 0; + + mockBufferManager.Setup(m => m.NextExpectedPartNumber) + .Returns(() => streamingCount + 1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((ds) => + { + if (ds is StreamingDataSource) + streamingCount++; + }); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act - Process parts 1, 2, 3 in order + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(2, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert - All should be streaming + Assert.AreEqual(3, streamingCount); + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region ProcessPartAsync Tests - Out-of-Order (Buffering Path) + + [TestMethod] + public async Task ProcessPartAsync_OutOfOrderPart_BuffersToMemory() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + StreamPartBuffer capturedBuffer = null; + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((buffer) => capturedBuffer = buffer); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); + + // Act - Process part 2 when expecting part 1 (out of order) + await handler.ProcessPartAsync(2, response, CancellationToken.None); + + // Assert + Assert.IsNotNull(capturedBuffer); + Assert.AreEqual(2, capturedBuffer.PartNumber); + Assert.AreEqual(512, capturedBuffer.Length); + + // Verify data was buffered correctly + byte[] bufferData = new byte[512]; + Buffer.BlockCopy(capturedBuffer.ArrayPoolBuffer, 0, bufferData, 0, 512); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, bufferData, 0, 512)); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_OutOfOrderPart_DisposesResponse() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act - Process out of order part + await handler.ProcessPartAsync(3, response, CancellationToken.None); + + // Assert - Response should be disposed after buffering + // After disposal, stream is either null or no longer readable + Assert.IsTrue(response.ResponseStream == null || !response.ResponseStream.CanRead); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_OutOfOrderPart_DoesNotReleaseCapacityImmediately() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act + await handler.ProcessPartAsync(2, response, CancellationToken.None); + + // Assert - AddBuffer should be called with StreamPartBuffer (not IPartDataSource) + mockBufferManager.Verify(m => m.AddBuffer( + It.IsAny()), Times.Once); + + // Note: Capacity will be released later when the buffer is consumed by the reader + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region ProcessPartAsync Tests - Mixed Scenarios + + [TestMethod] + public async Task ProcessPartAsync_MixedInOrderAndOutOfOrder_HandlesCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + var currentExpectedPart = 1; + mockBufferManager.Setup(m => m.NextExpectedPartNumber) + .Returns(() => currentExpectedPart); + + var streamingParts = 0; + var bufferedParts = 0; + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((ds) => + { + if (ds is StreamingDataSource) + { + streamingParts++; + currentExpectedPart++; + } + }); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Callback((buffer) => bufferedParts++); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act - Mixed order: 1 (in), 3 (out), 2 (in after advance) + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(2, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + Assert.AreEqual(2, streamingParts); // Parts 1 and 2 streamed + Assert.AreEqual(1, bufferedParts); // Part 3 buffered + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_InOrderFollowedByOutOfOrder_HandlesCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + mockBufferManager.SetupSequence(m => m.NextExpectedPartNumber) + .Returns(1) + .Returns(2); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource && ds.PartNumber == 1)), Times.Once); + + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(b => b.PartNumber == 3)), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_OutOfOrderFollowedByInOrder_HandlesCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + // NextExpectedPartNumber is called multiple times per part, so provide enough values + // Part 2 (out of order): calls it twice, should return 1 both times + // Part 1 (in order): calls it twice, should return 1 both times + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act + await handler.ProcessPartAsync(2, CreateMockGetObjectResponse(512), CancellationToken.None); + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(b => b.PartNumber == 2)), Times.Once); + + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource && ds.PartNumber == 1)), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_InOrderVsOutOfOrder_VerifyStreamingVsBufferingBehavior() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + // Track what types are added to verify memory allocation patterns + var streamingPartNumbers = new List(); // Parts that stream (no ArrayPool allocation) + var bufferedPartNumbers = new List(); // Parts that buffer (use ArrayPool) + + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + // Capture StreamingDataSource additions (streaming path - NO ArrayPool allocation) + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((ds) => + { + streamingPartNumbers.Add(ds.PartNumber); + }); + + // Capture StreamPartBuffer additions (buffering path - USES ArrayPool) + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((buffer) => + { + bufferedPartNumbers.Add(buffer.PartNumber); + }); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act - Process part 1 (in order - should stream, no ArrayPool buffer) + await handler.ProcessPartAsync(1, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Process part 3 (out of order - should buffer via ArrayPool) + await handler.ProcessPartAsync(3, CreateMockGetObjectResponse(512), CancellationToken.None); + + // Assert + // Part 1 should use streaming path (no ArrayPool allocation) + Assert.AreEqual(1, streamingPartNumbers.Count, "Expected exactly 1 part to stream"); + Assert.AreEqual(1, streamingPartNumbers[0], "Part 1 should stream directly"); + + // Part 3 should use buffering path (ArrayPool allocation) + Assert.AreEqual(1, bufferedPartNumbers.Count, "Expected exactly 1 part to be buffered"); + Assert.AreEqual(3, bufferedPartNumbers[0], "Part 3 should be buffered"); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_AllInOrderParts_NoBufferingAllStreaming() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + + var streamingPartNumbers = new List(); + var bufferedPartNumbers = new List(); + var currentExpectedPart = 1; + + mockBufferManager.Setup(m => m.NextExpectedPartNumber) + .Returns(() => currentExpectedPart); + + // Capture streaming additions + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((ds) => + { + streamingPartNumbers.Add(ds.PartNumber); + currentExpectedPart++; // Advance expected part after streaming + }); + + // Capture buffering additions + mockBufferManager.Setup(m => m.AddBuffer( + It.IsAny())) + .Callback((buffer) => + { + bufferedPartNumbers.Add(buffer.PartNumber); + }); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act - Process 5 parts in perfect order + for (int i = 1; i <= 5; i++) + { + await handler.ProcessPartAsync(i, CreateMockGetObjectResponse(512), CancellationToken.None); + } + + // Assert - Best case scenario: all parts stream, zero buffering + Assert.AreEqual(5, streamingPartNumbers.Count, "All 5 parts should stream"); + Assert.AreEqual(0, bufferedPartNumbers.Count, "No parts should be buffered when all arrive in order"); + + // Verify parts streamed in correct order + for (int i = 0; i < 5; i++) + { + Assert.AreEqual(i + 1, streamingPartNumbers[i], + $"Part {i + 1} should have streamed in order"); + } + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region ProcessPartAsync Tests - Error Handling + + [TestMethod] + public async Task ProcessPartAsync_StreamingPathError_ReleasesCapacity() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Throws(new InvalidOperationException("Test error")); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act & Assert + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(1, response, CancellationToken.None); + }); + + // Note: Handler's ReleaseCapacity is called on error, + // which eventually calls the manager's ReleaseBufferSpace + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_BufferingPathError_ReleasesCapacity() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Throws(new InvalidOperationException("Test error")); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act & Assert + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(2, response, CancellationToken.None); + }); + + // Capacity should be released on error + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_BufferingReadError_DisposesResponseAndReleasesCapacity() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Create response with faulty stream + var faultyStream = new FaultyStream(new IOException("Stream read error")); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = faultyStream + }; + + // Act & Assert + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(2, response, CancellationToken.None); + }); + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region WaitForCapacityAsync Tests + + [TestMethod] + public async Task WaitForCapacityAsync_DelegatesToBufferManager() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.WaitForBufferSpaceAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act + await handler.WaitForCapacityAsync(CancellationToken.None); + + // Assert + mockBufferManager.Verify(m => m.WaitForBufferSpaceAsync( + It.IsAny()), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region ReleaseCapacity Tests + + [TestMethod] + public void ReleaseCapacity_DelegatesToBufferManager() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.ReleaseBufferSpace()); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act + handler.ReleaseCapacity(); + + // Assert + mockBufferManager.Verify(m => m.ReleaseBufferSpace(), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region OnDownloadComplete Tests + + [TestMethod] + public void OnDownloadComplete_DelegatesToBufferManager() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.MarkDownloadComplete(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act + handler.OnDownloadComplete(null); + + // Assert + mockBufferManager.Verify(m => m.MarkDownloadComplete(null), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public void OnDownloadComplete_WithException_PassesExceptionToBufferManager() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + var testException = new Exception("Download failed"); + mockBufferManager.Setup(m => m.MarkDownloadComplete(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + // Act + handler.OnDownloadComplete(testException); + + // Assert + mockBufferManager.Verify(m => m.MarkDownloadComplete(testException), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + // Act - Dispose multiple times + handler.Dispose(); + handler.Dispose(); + handler.Dispose(); + + // Assert - Should not throw + } + + #endregion + + #region Semaphore Double Release Fix Tests + + [TestMethod] + public async Task ProcessPartAsync_StreamingPart_ReleasesCapacityOnlyOnce() + { + // This test verifies the fix for the double release bug in BufferedPartDataHandler. + // Before the fix: ProcessStreamingPart() called ReleaseBufferSpace() immediately after + // adding the StreamingDataSource, causing capacity to be released twice (once immediately, + // once later when the consumer finished reading the part). + // After the fix: The immediate ReleaseBufferSpace() call was removed. Capacity is released + // only once when the consumer finishes reading the part through PartBufferManager. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + var releaseCount = 0; + mockBufferManager.Setup(m => m.ReleaseBufferSpace()) + .Callback(() => releaseCount++); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act - Process an in-order (streaming) part + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert - ReleaseBufferSpace should NOT have been called during ProcessPartAsync + // (The removed code that called it immediately has been deleted) + // Capacity will be released later by PartBufferManager when consumer finishes reading + Assert.AreEqual(0, releaseCount, + "ProcessPartAsync should not release capacity for streaming parts. " + + "Capacity is released by PartBufferManager when consumer completes reading."); + + // Verify AddBuffer was called with StreamingDataSource (streaming path taken) + mockBufferManager.Verify(m => m.AddBuffer( + It.Is(ds => ds is StreamingDataSource)), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_BufferedPart_DoesNotReleaseCapacityImmediately() + { + // This test verifies that buffered (out-of-order) parts don't release capacity immediately. + // Capacity is released later by PartBufferManager when the consumer finishes reading the part. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + var releaseCount = 0; + mockBufferManager.Setup(m => m.ReleaseBufferSpace()) + .Callback(() => releaseCount++); + + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act - Process an out-of-order (buffered) part + await handler.ProcessPartAsync(3, response, CancellationToken.None); + + // Assert - ReleaseBufferSpace should NOT have been called + // Capacity will be released later by PartBufferManager when consumer finishes reading + Assert.AreEqual(0, releaseCount, + "ProcessPartAsync should not release capacity for buffered parts. " + + "Capacity is released by PartBufferManager when consumer completes reading."); + + // Verify AddBuffer was called with StreamPartBuffer (buffering path taken) + mockBufferManager.Verify(m => m.AddBuffer( + It.IsAny()), Times.Once); + } + finally + { + handler.Dispose(); + } + } + + [TestMethod] + public async Task ProcessPartAsync_StreamingPartError_DoesNotDoubleRelease() + { + // This test verifies that when an error occurs during streaming part processing, + // capacity is released correctly through ReleaseCapacity() without double-releasing. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockBufferManager = new Mock(); + mockBufferManager.Setup(m => m.NextExpectedPartNumber).Returns(1); + + var releaseCount = 0; + mockBufferManager.Setup(m => m.ReleaseBufferSpace()) + .Callback(() => releaseCount++); + + // Simulate error when adding buffer + mockBufferManager.Setup(m => m.AddBuffer(It.IsAny())) + .Throws(new InvalidOperationException("Test error")); + + var handler = new BufferedPartDataHandler(mockBufferManager.Object, config); + + try + { + var response = CreateMockGetObjectResponse(512); + + // Act & Assert - Should throw + await Assert.ThrowsExceptionAsync(async () => + { + await handler.ProcessPartAsync(1, response, CancellationToken.None); + }); + + // Verify ReleaseBufferSpace was NOT called during error handling + // (The old double-release bug would have called it, causing issues) + Assert.AreEqual(0, releaseCount, + "Error handling should not release capacity for streaming parts. " + + "Streaming parts don't hold capacity slots in BufferedPartDataHandler."); + } + finally + { + handler.Dispose(); + } + } + + #endregion + + #region Helper Methods + + /// + /// Creates a mock GetObjectResponse with test data. + /// + private GetObjectResponse CreateMockGetObjectResponse(long contentLength, byte[] testData = null) + { + if (testData == null) + { + testData = MultipartDownloadTestHelpers.GenerateTestData((int)contentLength, 0); + } + + return new GetObjectResponse + { + ContentLength = contentLength, + ResponseStream = new MemoryStream(testData), + ETag = "test-etag" + }; + } + + /// + /// Stream that throws exceptions for testing error handling. + /// + private class FaultyStream : Stream + { + private readonly Exception _exception; + + public FaultyStream(Exception exception) + { + _exception = exception; + } + + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + public override long Length => throw new NotSupportedException(); + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override void Flush() { } + + public override int Read(byte[] buffer, int offset, int count) + { + throw _exception; + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + throw _exception; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/ContentRangeParserTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ContentRangeParserTests.cs new file mode 100644 index 000000000000..a3896f08616a --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/ContentRangeParserTests.cs @@ -0,0 +1,381 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3.Util; +using System; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class ContentRangeParserTests + { + #region Parse Method Tests + + [TestMethod] + [TestCategory("S3")] + public void Parse_ValidContentRange_ReturnsCorrectValues() + { + // Arrange + var contentRange = "bytes 0-5242879/52428800"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(5242879, endByte); + Assert.AreEqual(52428800, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_ContentRangeWithoutBytesPrefix_ReturnsCorrectValues() + { + // Arrange - After .Replace("bytes ", ""), format becomes just "0-1023/2048" + var contentRange = "0-1023/2048"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(1023, endByte); + Assert.AreEqual(2048, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_SingleByteRange_ReturnsCorrectValues() + { + // Arrange - Edge case: single byte + var contentRange = "bytes 0-0/1"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(0, endByte); + Assert.AreEqual(1, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_LastByteOfObject_ReturnsCorrectValues() + { + // Arrange - Edge case: last byte + var contentRange = "bytes 999-999/1000"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(999, startByte); + Assert.AreEqual(999, endByte); + Assert.AreEqual(1000, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_MiddleRange_ReturnsCorrectValues() + { + // Arrange + var contentRange = "bytes 8388608-16777215/33554432"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(8388608, startByte); + Assert.AreEqual(16777215, endByte); + Assert.AreEqual(33554432, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_LargeFileRange_ReturnsCorrectValues() + { + // Arrange - Test with large values (multi-GB file) + var contentRange = "bytes 5368709120-10737418239/53687091200"; // 50GB file + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(5368709120L, startByte); + Assert.AreEqual(10737418239L, endByte); + Assert.AreEqual(53687091200L, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_NullContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.Parse(null); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_EmptyContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.Parse(string.Empty); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_MissingSlash_ThrowsException() + { + // Arrange - Invalid format: missing slash separator + var contentRange = "bytes 0-1023"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_MissingDash_ThrowsException() + { + // Arrange - Invalid format: missing dash in range + var contentRange = "bytes 0 1023/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_InvalidStartByte_ThrowsException() + { + // Arrange - Invalid: non-numeric start byte + var contentRange = "bytes abc-1023/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_InvalidEndByte_ThrowsException() + { + // Arrange - Invalid: non-numeric end byte + var contentRange = "bytes 0-xyz/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_InvalidTotalSize_ThrowsException() + { + // Arrange - Invalid: non-numeric total size + var contentRange = "bytes 0-1023/xyz"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_WildcardTotalSize_ThrowsException() + { + // Arrange - S3 should never return wildcard, but test handling + var contentRange = "bytes 0-1023/*"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_TooManySlashes_ThrowsException() + { + // Arrange - Invalid format: extra slashes + var contentRange = "bytes 0-1023/2048/extra"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void Parse_TooManyDashes_ThrowsException() + { + // Arrange - Invalid format: extra dashes + var contentRange = "bytes 0-512-1023/2048"; + + // Act & Assert + ContentRangeParser.Parse(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + public void Parse_ExtraSpaces_ReturnsCorrectValues() + { + // Arrange - ContentRange with multiple spaces (should handle gracefully) + var contentRange = "bytes 0-1023/2048"; + + // Act + var (startByte, endByte, totalSize) = ContentRangeParser.Parse(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + Assert.AreEqual(1023, endByte); + Assert.AreEqual(2048, totalSize); + } + + #endregion + + #region GetStartByte Method Tests + + [TestMethod] + [TestCategory("S3")] + public void GetStartByte_ValidContentRange_ReturnsStartByte() + { + // Arrange + var contentRange = "bytes 8388608-16777215/33554432"; + + // Act + var startByte = ContentRangeParser.GetStartByte(contentRange); + + // Assert + Assert.AreEqual(8388608, startByte); + } + + [TestMethod] + [TestCategory("S3")] + public void GetStartByte_ZeroStart_ReturnsZero() + { + // Arrange + var contentRange = "bytes 0-1023/2048"; + + // Act + var startByte = ContentRangeParser.GetStartByte(contentRange); + + // Assert + Assert.AreEqual(0, startByte); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetStartByte_InvalidContentRange_ThrowsException() + { + // Arrange + var contentRange = "invalid"; + + // Act & Assert + ContentRangeParser.GetStartByte(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetStartByte_NullContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.GetStartByte(null); + } + + #endregion + + #region GetTotalSize Method Tests + + [TestMethod] + [TestCategory("S3")] + public void GetTotalSize_ValidContentRange_ReturnsTotalSize() + { + // Arrange + var contentRange = "bytes 0-5242879/52428800"; + + // Act + var totalSize = ContentRangeParser.GetTotalSize(contentRange); + + // Assert + Assert.AreEqual(52428800, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + public void GetTotalSize_SingleByte_ReturnsOne() + { + // Arrange + var contentRange = "bytes 0-0/1"; + + // Act + var totalSize = ContentRangeParser.GetTotalSize(contentRange); + + // Assert + Assert.AreEqual(1, totalSize); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetTotalSize_InvalidContentRange_ThrowsException() + { + // Arrange + var contentRange = "invalid"; + + // Act & Assert + ContentRangeParser.GetTotalSize(contentRange); + } + + [TestMethod] + [TestCategory("S3")] + [ExpectedException(typeof(InvalidOperationException))] + public void GetTotalSize_NullContentRange_ThrowsException() + { + // Act & Assert + ContentRangeParser.GetTotalSize(null); + } + + [TestMethod] + [TestCategory("S3")] + public void GetTotalSize_LargeFile_ReturnsCorrectSize() + { + // Arrange - Test with very large file (>50GB) + var contentRange = "bytes 0-8388607/53687091200"; + + // Act + var totalSize = ContentRangeParser.GetTotalSize(contentRange); + + // Assert + Assert.AreEqual(53687091200L, totalSize); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs new file mode 100644 index 000000000000..900acf89d93b --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/DownloadDirectoryCommandTests.cs @@ -0,0 +1,1186 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class DownloadDirectoryCommandTests + { + private string _testDirectory; + private Mock _mockS3Client; + private TransferUtilityConfig _config; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + _mockS3Client = new Mock(); + _config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 4 + }; + + // Setup default S3 client config + var s3Config = new AmazonS3Config + { + BufferSize = 8192, + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesCommand() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + public void Constructor_WithUseMultipartDownload_CreatesCommand() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + public void Constructor_WithConfigAndMultipart_CreatesCommand() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullS3Client_ThrowsArgumentNullException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Act + var command = new DownloadDirectoryCommand(null, request, _config, useMultipartDownload: false); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullRequest_ThrowsArgumentNullException() + { + // Act + var command = new DownloadDirectoryCommand(_mockS3Client.Object, null, _config, useMultipartDownload: false); + } + + #endregion + + #region ValidateRequest Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.BucketName = null; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.BucketName = ""; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingS3Directory_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.S3Directory = null; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyS3Directory_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.S3Directory = ""; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingLocalDirectory_ThrowsInvalidOperationException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.LocalDirectory = null; + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + #endregion + + #region ExecuteAsync Tests - Empty Directory + + [TestMethod] + public async Task ExecuteAsync_EmptyDirectory_ReturnsZeroObjectsDownloaded() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + SetupEmptyDirectoryListing(); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsDownloaded); + } + + [TestMethod] + public async Task ExecuteAsync_EmptyDirectoryWithMultipart_ReturnsZeroObjectsDownloaded() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + SetupEmptyDirectoryListing(); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsDownloaded); + } + + #endregion + + #region ExecuteAsync Tests - Single File + + [TestMethod] + public async Task ExecuteAsync_SingleFile_DownloadsSuccessfully() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var fileSize = 1024; + SetupSingleFileDirectoryListing("test-file.txt", fileSize); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsDownloaded); + + var downloadedFile = Path.Combine(_testDirectory, "test-file.txt"); + Assert.IsTrue(File.Exists(downloadedFile)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, fileSize)); + } + + [TestMethod] + public async Task ExecuteAsync_SingleFileWithMultipart_DownloadsSuccessfully() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var fileSize = 1024; + SetupSingleFileDirectoryListing("test-file.txt", fileSize, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsDownloaded); + + var downloadedFile = Path.Combine(_testDirectory, "test-file.txt"); + Assert.IsTrue(File.Exists(downloadedFile)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, fileSize)); + } + + #endregion + + #region ExecuteAsync Tests - Multiple Files + + [TestMethod] + public async Task ExecuteAsync_MultipleFiles_DownloadsAll() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential for predictable testing + + var files = new Dictionary + { + { "file1.txt", 512 }, + { "file2.txt", 1024 }, + { "file3.txt", 2048 } + }; + + SetupMultipleFilesDirectoryListing(files); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + + foreach (var file in files) + { + var downloadedFile = Path.Combine(_testDirectory, file.Key); + Assert.IsTrue(File.Exists(downloadedFile), $"File {file.Key} should exist"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, file.Value), + $"File {file.Key} should have size {file.Value}"); + } + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesWithMultipart_DownloadsAll() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential for predictable testing + + var files = new Dictionary + { + { "large1.dat", 10 * 1024 * 1024 }, // 10MB + { "large2.dat", 15 * 1024 * 1024 } // 15MB + }; + + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesConcurrent_DownloadsAll() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; // Concurrent downloads + + var files = new Dictionary + { + { "file1.txt", 512 }, + { "file2.txt", 1024 }, + { "file3.txt", 2048 }, + { "file4.txt", 4096 } + }; + + SetupMultipleFilesDirectoryListing(files); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + + foreach (var file in files) + { + var downloadedFile = Path.Combine(_testDirectory, file.Key); + Assert.IsTrue(File.Exists(downloadedFile), $"File {file.Key} should exist"); + } + } + + #endregion + + #region ExecuteAsync Tests - Nested Directories + + [TestMethod] + public async Task ExecuteAsync_NestedDirectories_CreatesStructure() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; + + var files = new Dictionary + { + { "level1/file1.txt", 512 }, + { "level1/level2/file2.txt", 1024 }, + { "level1/level2/level3/file3.txt", 2048 } + }; + + SetupMultipleFilesDirectoryListing(files); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(files.Count, response.ObjectsDownloaded); + + foreach (var file in files) + { + var downloadedFile = Path.Combine(_testDirectory, file.Key.Replace('/', Path.DirectorySeparatorChar)); + Assert.IsTrue(File.Exists(downloadedFile), $"File {file.Key} should exist at {downloadedFile}"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(downloadedFile, file.Value)); + } + } + + #endregion + + #region ExecuteAsync Tests - Cancellation + + [TestMethod] + public async Task ExecuteAsync_WithCancelledToken_ThrowsOperationCanceledException() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + SetupSingleFileDirectoryListing("test.txt", 1024); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act & Assert + try + { + await command.ExecuteAsync(cts.Token); + Assert.Fail("Expected an OperationCanceledException to be thrown"); + } + catch (OperationCanceledException) + { + // Expected - TaskCanceledException inherits from OperationCanceledException + // This is the correct behavior + } + } + + [TestMethod] + public async Task ExecuteAsync_CancellationDuringDownload_CleansUpProperly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var files = new Dictionary + { + { "file1.txt", 512 }, + { "file2.txt", 1024 } + }; + + var cts = new CancellationTokenSource(); + + // Setup to cancel after first file starts downloading + var callCount = 0; + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(() => CreateListObjectsResponse(files)); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Callback(() => + { + callCount++; + if (callCount == 1) + cts.Cancel(); + }) + .ThrowsAsync(new OperationCanceledException()); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + try + { + await command.ExecuteAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - partial files should be cleaned up + await Task.Delay(100); // Give cleanup time to complete + } + + #endregion + + #region ExecuteAsync Tests - Edge Cases + + [TestMethod] + public async Task ExecuteAsync_DirectoryMarkers_SkipsDirectoryObjects() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + + // Include directory markers (keys ending with /) + var listResponse = new ListObjectsResponse + { + S3Objects = new List + { + new S3Object { Key = "prefix/subdir/", Size = 0 }, + new S3Object { Key = "prefix/file.txt", Size = 1024 } + } + }; + + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + SetupGetObjectForFile("prefix/file.txt", 1024); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, response.ObjectsDownloaded); // Only the file, not the directory marker + } + + [TestMethod] + public async Task ExecuteAsync_ExistingFiles_OverwritesCorrectly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var fileName = "existing-file.txt"; + var filePath = Path.Combine(_testDirectory, fileName); + + // Create existing file with old content + var oldData = MultipartDownloadTestHelpers.GenerateTestData(512, 999); + File.WriteAllBytes(filePath, oldData); + + var newFileSize = 1024; + SetupSingleFileDirectoryListing(fileName, newFileSize); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: false); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, response.ObjectsDownloaded); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(filePath, newFileSize)); + + // Verify content was overwritten + var newData = File.ReadAllBytes(filePath); + Assert.AreNotEqual(oldData.Length, newData.Length); + } + + #endregion + + #region Progress Tracking Tests + + [TestMethod] + public async Task ExecuteAsync_SingleFileMultipart_FiresProgressEvents() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + var progressEvents = new List(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var fileSize = 10 * 1024 * 1024; // 10MB + SetupSingleFileDirectoryListing("test.dat", fileSize, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // Verify final event + var finalEvent = progressEvents.Last(); + Assert.AreEqual(1, finalEvent.NumberOfFilesDownloaded, "Should have downloaded 1 file"); + Assert.AreEqual(1, finalEvent.TotalNumberOfFiles, "Should have 1 total file"); + Assert.AreEqual(fileSize, finalEvent.TransferredBytes, "All bytes should be transferred"); + Assert.AreEqual(fileSize, finalEvent.TotalBytes, "Total bytes should match file size"); + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesMultipart_AggregatesProgressCorrectly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential for predictable testing + + var progressEvents = new List(); + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var files = new Dictionary + { + { "file1.dat", 5 * 1024 * 1024 }, // 5MB + { "file2.dat", 10 * 1024 * 1024 } // 10MB + }; + + var totalBytes = files.Values.Sum(); + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + var finalEvent = progressEvents.Last(); + Assert.AreEqual(2, finalEvent.NumberOfFilesDownloaded, "Should have downloaded 2 files"); + Assert.AreEqual(2, finalEvent.TotalNumberOfFiles, "Should have 2 total files"); + Assert.AreEqual(totalBytes, finalEvent.TransferredBytes, "All bytes should be transferred"); + Assert.AreEqual(totalBytes, finalEvent.TotalBytes, "Total bytes should match sum of all files"); + + // Verify progress increases monotonically + long lastTransferred = 0; + foreach (var evt in progressEvents) + { + Assert.IsTrue(evt.TransferredBytes >= lastTransferred, + "TransferredBytes should never decrease"); + lastTransferred = evt.TransferredBytes; + } + } + + [TestMethod] + public async Task ExecuteAsync_ConcurrentMultipart_FiresProgressCorrectly() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; // Concurrent + + var progressEvents = new List(); + var progressLock = new object(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var files = new Dictionary + { + { "file1.dat", 8 * 1024 * 1024 }, // 8MB + { "file2.dat", 8 * 1024 * 1024 }, // 8MB + { "file3.dat", 8 * 1024 * 1024 } // 8MB + }; + + var totalBytes = files.Values.Sum(); + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // Verify monotonic increase in transferred bytes despite concurrent execution + long lastTransferred = 0; + foreach (var evt in progressEvents) + { + Assert.IsTrue(evt.TransferredBytes >= lastTransferred, + "TransferredBytes should never decrease even in concurrent mode"); + lastTransferred = evt.TransferredBytes; + } + + var finalEvent = progressEvents.Last(); + Assert.AreEqual(3, finalEvent.NumberOfFilesDownloaded, "Should have downloaded 3 files"); + Assert.AreEqual(totalBytes, finalEvent.TransferredBytes, "All bytes should be transferred"); + } + + [TestMethod] + public async Task ExecuteAsync_ConcurrentMode_OmitsCurrentFileDetails() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; + + var progressEvents = new List(); + var progressLock = new object(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + SetupSingleFileDirectoryListing("test.dat", 8 * 1024 * 1024, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // In concurrent mode, current file details should be null/zero + foreach (var evt in progressEvents) + { + Assert.IsNull(evt.CurrentFile, "CurrentFile should be null in concurrent mode"); + Assert.AreEqual(0, evt.TransferredBytesForCurrentFile, + "TransferredBytesForCurrentFile should be 0 in concurrent mode"); + Assert.AreEqual(0, evt.TotalNumberOfBytesForCurrentFile, + "TotalNumberOfBytesForCurrentFile should be 0 in concurrent mode"); + } + } + + [TestMethod] + public async Task ExecuteAsync_SequentialMode_IncludesCurrentFileDetails() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential + + var progressEvents = new List(); + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var fileSize = 5 * 1024 * 1024; // 5MB + SetupSingleFileDirectoryListing("test-file.dat", fileSize, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // In sequential mode, current file details should be populated + var eventsWithFile = progressEvents.Where(e => e.CurrentFile != null).ToList(); + Assert.IsTrue(eventsWithFile.Count > 0, "Should have events with CurrentFile populated"); + + foreach (var evt in eventsWithFile) + { + Assert.AreEqual("test-file.dat", evt.CurrentFile, "CurrentFile should be set"); + Assert.IsTrue(evt.TotalNumberOfBytesForCurrentFile > 0, + "TotalNumberOfBytesForCurrentFile should be greater than 0"); + } + + // Verify final event has complete file details + var finalEvent = progressEvents.Last(); + Assert.AreEqual("test-file.dat", finalEvent.CurrentFile); + Assert.AreEqual(fileSize, finalEvent.TotalNumberOfBytesForCurrentFile); + } + + [TestMethod] + public async Task ExecuteAsync_MultipleFilesSequential_TracksPerFileProgress() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; + + var progressEvents = new List(); + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var files = new Dictionary + { + { "small.dat", 2 * 1024 * 1024 }, // 2MB + { "medium.dat", 5 * 1024 * 1024 }, // 5MB + { "large.dat", 10 * 1024 * 1024 } // 10MB + }; + + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(progressEvents.Count > 0, "Should fire progress events"); + + // Verify we see progress for each file + var filesTracked = progressEvents + .Where(e => e.CurrentFile != null) + .Select(e => e.CurrentFile) + .Distinct() + .ToList(); + + Assert.AreEqual(3, filesTracked.Count, "Should track progress for all 3 files"); + Assert.IsTrue(filesTracked.Contains("small.dat"), "Should track small.dat"); + Assert.IsTrue(filesTracked.Contains("medium.dat"), "Should track medium.dat"); + Assert.IsTrue(filesTracked.Contains("large.dat"), "Should track large.dat"); + } + + [TestMethod] + public async Task ExecuteAsync_ProgressEventsCancellation_StopsProgressTracking() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; + + var progressEvents = new List(); + var cts = new CancellationTokenSource(); + + request.DownloadedDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + // Cancel after first progress event + if (progressEvents.Count == 1) + { + cts.Cancel(); + } + }; + + var files = new Dictionary + { + { "file1.dat", 5 * 1024 * 1024 }, + { "file2.dat", 5 * 1024 * 1024 } + }; + + SetupMultipleFilesDirectoryListing(files, setupForMultipart: true); + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, _config, useMultipartDownload: true); + + // Act & Assert + try + { + await command.ExecuteAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Verify we got at least one progress event before cancellation + Assert.IsTrue(progressEvents.Count >= 1, "Should have fired at least one progress event"); + } + + #endregion + + #region Concurrency Control Tests + + /// + /// Tests that ConcurrentServiceRequests setting actually limits concurrent file downloads. + /// This test will FAIL on the current broken implementation, demonstrating that + /// ConcurrentServiceRequests is not being respected. + /// + /// Expected: Max 2 concurrent downloads (ConcurrentServiceRequests = 2) + /// Actual (broken): 5 concurrent downloads (all files download simultaneously) + /// + [TestMethod] + public async Task ExecuteAsync_ConcurrentServiceRequests_RespectsLimit() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = true; + + // Use a low limit to make violation obvious + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 // Only 2 files should download simultaneously + }; + + // Track concurrent downloads using thread-safe counter + var currentConcurrentDownloads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + var files = new Dictionary + { + { "file1.dat", 5 * 1024 * 1024 }, // 5MB files + { "file2.dat", 5 * 1024 * 1024 }, + { "file3.dat", 5 * 1024 * 1024 }, + { "file4.dat", 5 * 1024 * 1024 }, + { "file5.dat", 5 * 1024 * 1024 } // 5 files total + }; + + // Setup directory listing + var listResponse = CreateListObjectsResponse(files); + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + // Override GetObjectAsync to track concurrency + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (GetObjectRequest req, CancellationToken ct) => + { + // Increment counter when download starts + lock (concurrencyLock) + { + currentConcurrentDownloads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentDownloads); + Console.WriteLine($"Download started for {req.Key}. Current concurrent: {currentConcurrentDownloads}, Max observed: {maxObservedConcurrency}"); + } + + try + { + // Simulate some download time to ensure overlap + await Task.Delay(100, ct); + + // Return mock response + var fileName = req.Key.Split('/').Last(); + var fileSize = files[fileName]; + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + return new GetObjectResponse + { + BucketName = req.BucketName, + Key = req.Key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + ETag = "\"test-etag\"" + }; + } + finally + { + // Decrement counter when download completes + lock (concurrencyLock) + { + currentConcurrentDownloads--; + Console.WriteLine($"Download completed for {req.Key}. Current concurrent: {currentConcurrentDownloads}"); + } + } + }); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Console.WriteLine($"Test Results: Expected max concurrency ≤ {config.ConcurrentServiceRequests}, Observed: {maxObservedConcurrency}"); + Assert.AreEqual(2, config.ConcurrentServiceRequests, "Test setup verification"); + Assert.IsTrue(maxObservedConcurrency <= config.ConcurrentServiceRequests, + $"Max concurrent downloads ({maxObservedConcurrency}) should not exceed ConcurrentServiceRequests ({config.ConcurrentServiceRequests})"); + } + + /// + /// Tests that sequential mode (DownloadFilesConcurrently = false) downloads only one file at a time. + /// This test will FAIL on the current broken implementation, demonstrating that + /// sequential mode is not working correctly. + /// + /// Expected: Max 1 concurrent download (sequential mode) + /// Actual (broken): 3 concurrent downloads (all files download simultaneously despite sequential setting) + /// + [TestMethod] + public async Task ExecuteAsync_SequentialMode_DownloadsOneAtATime() + { + // Arrange + var request = CreateDownloadDirectoryRequest(); + request.DownloadFilesConcurrently = false; // Sequential mode + + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 10 // High limit, but sequential should still be 1 + }; + + // Track concurrent downloads + var currentConcurrentDownloads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + var files = new Dictionary + { + { "file1.dat", 1024 }, + { "file2.dat", 1024 }, + { "file3.dat", 1024 } + }; + + // Setup directory listing + var listResponse = CreateListObjectsResponse(files); + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + // Override GetObjectAsync to track concurrency + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (GetObjectRequest req, CancellationToken ct) => + { + lock (concurrencyLock) + { + currentConcurrentDownloads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentDownloads); + Console.WriteLine($"Sequential download started for {req.Key}. Current concurrent: {currentConcurrentDownloads}, Max observed: {maxObservedConcurrency}"); + } + + try + { + await Task.Delay(50, ct); // Brief delay + + var fileName = req.Key.Split('/').Last(); + var fileSize = files[fileName]; + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + return new GetObjectResponse + { + BucketName = req.BucketName, + Key = req.Key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + ETag = "\"test-etag\"" + }; + } + finally + { + lock (concurrencyLock) + { + currentConcurrentDownloads--; + Console.WriteLine($"Sequential download completed for {req.Key}. Current concurrent: {currentConcurrentDownloads}"); + } + } + }); + + var command = new DownloadDirectoryCommand(_mockS3Client.Object, request, config, useMultipartDownload: false); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Console.WriteLine($"Sequential Test Results: Expected max concurrency = 1, Observed: {maxObservedConcurrency}"); + Assert.AreEqual(1, maxObservedConcurrency, + $"Sequential mode should only download 1 file at a time, but observed {maxObservedConcurrency}"); + } + + #endregion + + #region Helper Methods + + private TransferUtilityDownloadDirectoryRequest CreateDownloadDirectoryRequest( + string bucketName = "test-bucket", + string s3Directory = "prefix", + string localDirectory = null) + { + localDirectory = localDirectory ?? _testDirectory; + + return new TransferUtilityDownloadDirectoryRequest + { + BucketName = bucketName, + S3Directory = s3Directory, + LocalDirectory = localDirectory + }; + } + + private void SetupEmptyDirectoryListing() + { + var listResponse = new ListObjectsResponse + { + S3Objects = new List() + }; + + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + } + + private void SetupSingleFileDirectoryListing(string fileName, long fileSize, bool setupForMultipart = false) + { + var files = new Dictionary { { fileName, fileSize } }; + SetupMultipleFilesDirectoryListing(files, setupForMultipart); + } + + private void SetupMultipleFilesDirectoryListing(Dictionary files, bool setupForMultipart = false) + { + var listResponse = CreateListObjectsResponse(files); + + _mockS3Client.Setup(c => c.ListObjectsAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(listResponse); + + // Setup GetObject for each file + foreach (var file in files) + { + SetupGetObjectForFile($"prefix/{file.Key}", file.Value, setupForMultipart); + } + } + + private ListObjectsResponse CreateListObjectsResponse(Dictionary files) + { + var s3Objects = files.Select(f => new S3Object + { + Key = $"prefix/{f.Key}", + Size = f.Value + }).ToList(); + + return new ListObjectsResponse + { + S3Objects = s3Objects + }; + } + + private void SetupGetObjectForFile(string key, long fileSize, bool setupForMultipart = false) + { + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + if (setupForMultipart) + { + // For multipart downloads using PART strategy, we need to: + // 1. First request (PartNumber=1) returns PartsCount > 1 + // 2. Subsequent requests for each part + + var partsCount = (int)Math.Ceiling((double)fileSize / (8 * 1024 * 1024)); // 8MB parts + if (partsCount < 2) partsCount = 2; // Force multipart for testing + + var partSize = fileSize / partsCount; + var lastPartSize = fileSize - (partSize * (partsCount - 1)); + + // Setup first part request (discovery) + var firstPartData = MultipartDownloadTestHelpers.GenerateTestData((int)partSize, 0); + var firstPartResponse = new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = partSize, + ResponseStream = new MemoryStream(firstPartData), + ContentRange = $"bytes 0-{partSize - 1}/{fileSize}", + ETag = "\"test-etag\"", + PartsCount = partsCount + }; + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.Is(r => r.Key == key && r.PartNumber == 1), + It.IsAny())) + .ReturnsAsync(() => + { + // Return new stream each time to avoid disposed stream issues + var newData = MultipartDownloadTestHelpers.GenerateTestData((int)partSize, 0); + return new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = partSize, + ResponseStream = new MemoryStream(newData), + ContentRange = $"bytes 0-{partSize - 1}/{fileSize}", + ETag = "\"test-etag\"", + PartsCount = partsCount + }; + }); + + // Setup subsequent part requests + for (int i = 2; i <= partsCount; i++) + { + var partNum = i; + var currentPartSize = (partNum == partsCount) ? lastPartSize : partSize; + var startByte = (partNum - 1) * partSize; + var endByte = startByte + currentPartSize - 1; + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.Is(r => r.Key == key && r.PartNumber == partNum), + It.IsAny())) + .ReturnsAsync(() => + { + var partData = MultipartDownloadTestHelpers.GenerateTestData((int)currentPartSize, (int)startByte); + return new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = currentPartSize, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {startByte}-{endByte}/{fileSize}", + ETag = "\"test-etag\"", + PartsCount = partsCount + }; + }); + } + } + else + { + // For non-multipart (simple) downloads + var response = new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + ETag = "\"test-etag\"" + }; + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.Is(r => r.Key == key), + It.IsAny())) + .ReturnsAsync(() => + { + // Return new stream each time to avoid disposed stream issues + var newData = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + return new GetObjectResponse + { + BucketName = "test-bucket", + Key = key, + ContentLength = fileSize, + ResponseStream = new MemoryStream(newData), + ETag = "\"test-etag\"" + }; + }); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json index 224a0a35dfdb..6e34b8b1c2de 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/mapping.json @@ -43,6 +43,7 @@ }, "UploadResponse": { "PutObjectResponse": [ + "Bucket", "BucketKeyEnabled", "ChecksumCRC32", "ChecksumCRC32C", @@ -52,12 +53,15 @@ "ChecksumType", "ETag", "Expiration", + "Key", + "Location", "RequestCharged", "SSECustomerAlgorithm", "SSECustomerKeyMD5", "SSEKMSEncryptionContext", "SSEKMSKeyId", "ServerSideEncryption", + "Size", "VersionId" ] }, @@ -129,19 +133,43 @@ "Conversion": { "UploadRequest": { "PutObjectRequest": [ + "ACL", "Bucket", + "BucketKeyEnabled", + "CacheControl", "ChecksumAlgorithm", "ChecksumCRC32", "ChecksumCRC32C", "ChecksumCRC64NVME", "ChecksumSHA1", "ChecksumSHA256", + "ContentDisposition", + "ContentEncoding", + "ContentLanguage", + "ContentType", "ExpectedBucketOwner", + "Expires", + "GrantFullControl", + "GrantRead", + "GrantReadACP", + "GrantWriteACP", + "IfMatch", + "IfNoneMatch", "Key", + "Metadata", + "ObjectLockLegalHoldStatus", + "ObjectLockMode", + "ObjectLockRetainUntilDate", "RequestPayer", "SSECustomerAlgorithm", "SSECustomerKey", - "SSECustomerKeyMD5" + "SSECustomerKeyMD5", + "SSEKMSEncryptionContext", + "SSEKMSKeyId", + "ServerSideEncryption", + "StorageClass", + "Tagging", + "WebsiteRedirectLocation" ], "CreateMultipartRequest": [ "ACL", @@ -210,6 +238,7 @@ }, "CompleteMultipartResponse": { "UploadResponse": [ + "Bucket", "BucketKeyEnabled", "ChecksumCRC32", "ChecksumCRC32C", @@ -219,6 +248,8 @@ "ChecksumType", "ETag", "Expiration", + "Key", + "Location", "RequestCharged", "SSEKMSKeyId", "ServerSideEncryption", @@ -242,6 +273,7 @@ "SSEKMSEncryptionContext", "SSEKMSKeyId", "ServerSideEncryption", + "Size", "VersionId" ] }, diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json index 650420c5d0d7..fbd061a7312e 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -8,6 +8,7 @@ "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" }, "TransferUtilityUploadResponse": { + "Bucket" : "BucketName", "ServerSideEncryption": "ServerSideEncryptionMethod", "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", @@ -127,6 +128,32 @@ }, "AbortMultipartUploadRequest": { "Bucket": "BucketName" + }, + "CompleteMultipartUploadResponse": { + "Bucket" : "BucketName", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId" + }, + "GetObjectResponse": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "Restore": "RestoreExpiration" + }, + "TransferUtilityDownloadResponse": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "Restore": "RestoreExpiration" + }, + "TransferUtilityOpenStreamResponse": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "Restore": "RestoreExpiration" } } } \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs new file mode 100644 index 000000000000..4d270c1f12ff --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FailurePolicyTests.cs @@ -0,0 +1,1005 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Amazon.Runtime; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class FailurePolicyTests + { + private static TransferUtilityDownloadDirectoryRequest CreateRequest(string localDir, FailurePolicy policy) + { + return new TransferUtilityDownloadDirectoryRequest + { + BucketName = "test-bucket", + S3Directory = "prefix/", + LocalDirectory = localDir, + FailurePolicy = policy, + DownloadFilesConcurrently = true + }; + } + + private static TransferUtilityUploadDirectoryRequest CreateUploadRequest(string localDir, FailurePolicy policy) + { + return new TransferUtilityUploadDirectoryRequest + { + BucketName = "test-bucket", + Directory = localDir, + FailurePolicy = policy, + UploadFilesConcurrently = true + }; + } + + private static GetObjectResponse SuccessObject(string bucket, string key, string content = "data") + { + return new GetObjectResponse + { + BucketName = bucket, + Key = key, + ResponseStream = new MemoryStream(Encoding.UTF8.GetBytes(content)), + ContentLength = content.Length + }; + } + + private static Mock CreateMockS3(IEnumerable keys, Func shouldFail) + { + var mock = new Mock(); + + mock.Setup(m => m.Config).Returns(new AmazonS3Config()); + // ListObjectsAsync returns all objects in one page + mock.Setup(m => m.ListObjectsAsync(It.Is(r => r.BucketName == "test-bucket"), It.IsAny())) + .ReturnsAsync(new ListObjectsResponse + { + S3Objects = keys.Select(k => new S3Object { Key = k, Size = 4 }).ToList() + }); + + foreach (var key in keys) + { + if (shouldFail(key)) + { + mock.Setup(m => m.GetObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("Simulated failure for " + key)); + } + else + { + mock.Setup(m => m.GetObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ReturnsAsync(SuccessObject("test-bucket", key)); + } + } + + mock.Setup(m => m.Dispose()); + return mock; + } + + private static Mock CreateMockS3ForUpload(IEnumerable keys, Func shouldFail) + { + var mock = new Mock(); + mock.Setup(m => m.Config).Returns(new AmazonS3Config()); + + foreach (var key in keys) + { + if (shouldFail(key)) + { + mock.Setup(m => m.PutObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("Simulated failure for " + key)); + } + else + { + mock.Setup(m => m.PutObjectAsync(It.Is(r => r.Key == key && r.BucketName == "test-bucket"), It.IsAny())) + .ReturnsAsync(new PutObjectResponse()); + } + } + + mock.Setup(m => m.Dispose()); + return mock; + } + + private static string CreateTempDirectory() + { + string dir = Path.Combine(Path.GetTempPath(), "FailurePolicyTests", Guid.NewGuid().ToString()); + Directory.CreateDirectory(dir); + return dir; + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ContinueOnFailure_PartialSuccess() + { + var keys = new[] { "prefix/file1.txt", "prefix/file2.txt", "prefix/file3.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(1, response.Errors.Count); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file3.txt"))); + Assert.IsFalse(File.Exists(Path.Combine(localDir, "file2.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ContinueOnFailure_AllFailures() + { + var keys = new[] { "prefix/fileA.txt", "prefix/fileB.txt" }; + var mockS3 = CreateMockS3(keys, k => true); + string localDir = CreateTempDirectory(); + try + { + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsDownloaded); + Assert.AreEqual(2, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Failure, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(2, response.Errors.Count); + Assert.AreEqual(0, Directory.GetFiles(localDir).Length); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ContinueOnFailure_AllSuccess() + { + var keys = new[] { "prefix/ok1.txt", "prefix/ok2.txt" }; + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "ok1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "ok2.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_AbortOnFailure_ThrowsOnFirstFailure() + { + var keys = new[] { "prefix/first.txt", "prefix/second.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("second.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + var ex = await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("second.txt")); + // first file may or may not have downloaded depending on timing; ensure at least one file attempt occurred + Assert.IsTrue(Directory.GetFiles(localDir).Length <= 1); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_CancelInHandler_ContinueOnFailure_Throws() + { + var keys = new[] { "prefix/file1.txt", "prefix/file2.txt", "prefix/file3.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + // Make sequential to make behavior deterministic for the test. + request.DownloadFilesConcurrently = false; + + bool handlerInvoked = false; + request.ObjectDownloadFailedEvent += (sender, args) => + { + handlerInvoked = true; + throw new AmazonS3Exception("Stop processing immediately"); + }; + + var ex = await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Equals("Stop processing immediately")); + + Assert.IsTrue(handlerInvoked, "ObjectDownloadFailedEvent handler was not invoked."); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_CancelInHandler_AbortOnFailure_Throws() + { + var keys = new[] { "prefix/first.txt", "prefix/second.txt", "prefix/third.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("second.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + request.DownloadFilesConcurrently = false; + + request.ObjectDownloadFailedEvent += (sender, args) => + { + throw new AmazonS3Exception("Stop processing immediately"); + }; + + var ex = await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Equals("Stop processing immediately")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_ArgsContainExpectedData_ContinueOnFailure() + { + var keys = new[] { "prefix/a.txt", "prefix/b.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("b.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + // collect events + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // wait briefly for any background event dispatch + invoked.Wait(1000); + + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(1, captured.Count); + + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("b.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ObjectDownloadFailedEvent_ArgsContainExpectedData_AbortOnFailure() + { + var keys = new[] { "prefix/x.txt", "prefix/y.txt" }; + var mockS3 = CreateMockS3(keys, k => k.EndsWith("y.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var tu = new TransferUtility(mockS3.Object); + await Assert.ThrowsExceptionAsync(() => tu.DownloadDirectoryAsync(request)); + + // wait for event + invoked.Wait(1000); + + Assert.AreEqual(1, captured.Count); + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("y.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ContinueOnFailure_PartialSuccess() + { + var fileNames = new[] { "file1.txt", "file2.txt", "file3.txt" }; + string localDir = CreateTempDirectory(); + try + { + // create files + foreach (var f in fileNames) + { + File.WriteAllText(Path.Combine(localDir, f), "data"); + } + + var mockS3 = CreateMockS3ForUpload(fileNames, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsUploaded); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(1, response.Errors.Count); + // local files remain + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file3.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file2.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ContinueOnFailure_AllFailures() + { + var fileNames = new[] { "fileA.txt", "fileB.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => true); + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(0, response.ObjectsUploaded); + Assert.AreEqual(2, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Failure, response.Result); + Assert.IsNotNull(response.Errors); + Assert.AreEqual(2, response.Errors.Count); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ContinueOnFailure_AllSuccess() + { + var fileNames = new[] { "ok1.txt", "ok2.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => false); + var cancellationToken = new CancellationToken(); + var config = new TransferUtilityConfig(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_AbortOnFailure_ThrowsOnFirstFailure() + { + var fileNames = new[] { "first.txt", "second.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => k.EndsWith("second.txt", StringComparison.Ordinal)); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.AbortOnFailure); + + var ex = await Assert.ThrowsExceptionAsync(() => tu.UploadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("second.txt")); + // first file may or may not have uploaded depending on timing; ensure at least one file attempt occurred + Assert.IsTrue(Directory.GetFiles(localDir).Length >= 1); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ObjectUploadFailedEvent_CancelInHandler_ContinueOnFailure_Throws() + { + var fileNames = new[] { "file1.txt", "file2.txt", "file3.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(fileNames, k => k.EndsWith("file2.txt", StringComparison.Ordinal)); + var tu = new TransferUtility(mockS3.Object); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + // Make sequential to make behavior deterministic for the test. + request.UploadFilesConcurrently = false; + + bool handlerInvoked = false; + request.ObjectUploadFailedEvent += (sender, args) => + { + handlerInvoked = true; + throw new AmazonS3Exception("Stop processing immediately"); + }; + + var ex = await Assert.ThrowsExceptionAsync(() => tu.UploadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Equals("Stop processing immediately")); + + Assert.IsTrue(handlerInvoked, "ObjectUploadFailedEvent handler was not invoked."); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ObjectUploadFailedEvent_ArgsContainExpectedData_ContinueOnFailure() + { + var fileNames = new[] { "a.txt", "b.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(new[] { "a.txt", "b.txt" }, k => k.EndsWith("b.txt", StringComparison.Ordinal)); + var config = new TransferUtilityConfig(); + var request = CreateUploadRequest(localDir, FailurePolicy.ContinueOnFailure); + // collect events + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + request.ObjectUploadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var tu = new TransferUtility(mockS3.Object); + var command = new UploadDirectoryCommand(tu, config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // wait briefly for any background event dispatch + invoked.Wait(1000); + + Assert.IsNotNull(response); + Assert.AreEqual(1, response.ObjectsFailed); + Assert.AreEqual(1, captured.Count); + + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("b.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadDirectory_ObjectUploadFailedEvent_ArgsContainExpectedData_AbortOnFailure() + { + var fileNames = new[] { "x.txt", "y.txt" }; + string localDir = CreateTempDirectory(); + try + { + foreach (var f in fileNames) + File.WriteAllText(Path.Combine(localDir, f), "data"); + + var mockS3 = CreateMockS3ForUpload(new[] { "x.txt", "y.txt" }, k => k.EndsWith("y.txt", StringComparison.Ordinal)); + var request = CreateUploadRequest(localDir, FailurePolicy.AbortOnFailure); + var captured = new List(); + var invoked = new ManualResetEventSlim(false); + + request.ObjectUploadFailedEvent += (sender, args) => + { + captured.Add(args); + invoked.Set(); + }; + + var tu = new TransferUtility(mockS3.Object); + await Assert.ThrowsExceptionAsync(() => tu.UploadDirectoryAsync(request)); + + // wait for event + invoked.Wait(1000); + + Assert.AreEqual(1, captured.Count); + var evt = captured[0]; + Assert.AreSame(request, evt.DirectoryRequest); + Assert.IsNotNull(evt.ObjectRequest); + Assert.IsTrue(evt.ObjectRequest.Key.EndsWith("y.txt", StringComparison.Ordinal)); + Assert.IsNotNull(evt.Exception); + Assert.IsTrue(evt.Exception.Message.Contains("Simulated failure for")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #region Path Validation Failure Tests + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_PathTraversalAttack_ContinueOnFailure_SkipsInvalidPath() + { + // Test path traversal attack with ContinueOnFailure + // Malicious S3 key attempts to write outside target directory + var keys = new[] { + "prefix/valid1.txt", + "prefix/../../etc/passwd", // Path traversal attempt + "prefix/valid2.txt" + }; + var mockS3 = CreateMockS3(keys, k => false); // All downloads would succeed if allowed + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var captured = new List(); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + }; + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // Assert: Path validation failure should be counted, valid files downloaded + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded, "Should download 2 valid files"); + Assert.AreEqual(1, response.ObjectsFailed, "Should have 1 path validation failure"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.AreEqual(1, response.Errors.Count); + + // Verify the error is an AmazonClientException (path validation error) + Assert.IsInstanceOfType(response.Errors[0], typeof(AmazonClientException)); + Assert.IsTrue(response.Errors[0].Message.Contains("not allowed outside")); + + // Verify valid files were downloaded + Assert.IsTrue(File.Exists(Path.Combine(localDir, "valid1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "valid2.txt"))); + + // Verify ObjectDownloadFailedEvent was raised for path validation failure + Assert.AreEqual(1, captured.Count); + Assert.IsInstanceOfType(captured[0].Exception, typeof(AmazonClientException)); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_PathTraversalAttack_AbortOnFailure_ThrowsOnValidationFailure() + { + // Test path traversal attack with AbortOnFailure + var keys = new[] { + "prefix/file1.txt", + "prefix/../../../secrets.txt" // Path traversal attempt + }; + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + // Should throw on path validation failure + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("not allowed outside")); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_MixedValidationAndDownloadFailures_ContinueOnFailure_TracksAllFailures() + { + // Test mixed path validation failures + download failures + var keys = new[] { + "prefix/good.txt", // Should succeed + "prefix/../../bad-path.txt", // Path validation failure + "prefix/download-fail.txt", // Download failure + "prefix/another-good.txt" // Should succeed + }; + + var mockS3 = CreateMockS3(keys, k => k.EndsWith("download-fail.txt", StringComparison.Ordinal)); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + var captured = new List(); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + }; + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + // Assert: Both failure types should be tracked + Assert.IsNotNull(response); + Assert.AreEqual(2, response.ObjectsDownloaded, "Should download 2 valid files"); + Assert.AreEqual(2, response.ObjectsFailed, "Should have 2 failures (1 validation + 1 download)"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.AreEqual(2, response.Errors.Count); + + // Verify both error types are present + var hasClientException = response.Errors.Any(e => e is AmazonClientException && e.Message.Contains("not allowed outside")); + var hasS3Exception = response.Errors.Any(e => e is AmazonS3Exception); + Assert.IsTrue(hasClientException, "Should have path validation error"); + Assert.IsTrue(hasS3Exception, "Should have download failure error"); + + // Verify events were raised for both failures + Assert.AreEqual(2, captured.Count); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion + + #region Sequential Mode Tests + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_SequentialMode_MultipleFailures_ContinueOnFailure() + { + // Test sequential download mode with multiple failures + var keys = new[] { + "prefix/file1.txt", // Success + "prefix/file2.txt", // Failure + "prefix/file3.txt", // Success + "prefix/file4.txt", // Failure + "prefix/file5.txt" // Success + }; + var mockS3 = CreateMockS3(keys, k => k.Contains("file2") || k.Contains("file4")); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + request.DownloadFilesConcurrently = false; // Sequential mode + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded, "Should download 3 files successfully"); + Assert.AreEqual(2, response.ObjectsFailed, "Should have 2 failures"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + + // Verify correct files were downloaded + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file1.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file3.txt"))); + Assert.IsTrue(File.Exists(Path.Combine(localDir, "file5.txt"))); + Assert.IsFalse(File.Exists(Path.Combine(localDir, "file2.txt"))); + Assert.IsFalse(File.Exists(Path.Combine(localDir, "file4.txt"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_SequentialMode_FirstFileFailure_AbortOnFailure() + { + // Test AbortOnFailure in sequential mode when first file fails + var keys = new[] { + "prefix/fail-first.txt", + "prefix/should-not-download1.txt", + "prefix/should-not-download2.txt" + }; + var mockS3 = CreateMockS3(keys, k => k.Contains("fail-first")); + string localDir = CreateTempDirectory(); + try + { + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + request.DownloadFilesConcurrently = false; // Sequential mode + + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("fail-first")); + + // Should not have downloaded any other files + Assert.AreEqual(0, Directory.GetFiles(localDir).Length); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion + + #region Concurrency Control Under Failure Tests + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_LimitedConcurrency_MultipleFailures_ContinueOnFailure() + { + // Test that failures are properly handled with limited concurrency + var keys = new[] { + "prefix/file1.txt", + "prefix/file2.txt", + "prefix/file3.txt", + "prefix/file4.txt", + "prefix/file5.txt", + "prefix/file6.txt" + }; + + // Make files 2, 4, and 6 fail + var mockS3 = CreateMockS3(keys, k => k.Contains("file2") || k.Contains("file4") || k.Contains("file6")); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 // Limit to 2 concurrent downloads + }; + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded, "Should download 3 files successfully"); + Assert.AreEqual(3, response.ObjectsFailed, "Should have 3 failures"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + Assert.AreEqual(3, response.Errors.Count); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_LimitedConcurrency_EarlyFailure_AbortOnFailure() + { + // Test that AbortOnFailure cancels pending tasks with limited concurrency + var keys = new[] { + "prefix/file1.txt", + "prefix/file2-fail.txt", // This will fail + "prefix/file3.txt", + "prefix/file4.txt", + "prefix/file5.txt" + }; + + var mockS3 = CreateMockS3(keys, k => k.Contains("file2-fail")); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 + }; + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("file2-fail")); + + // Some files may have downloaded before the failure, but not all + Assert.IsTrue(Directory.GetFiles(localDir).Length < keys.Length); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion + + #region Validation Phase Failure with AbortOnFailure + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_ValidationPhaseFailure_AbortOnFailure_StopsImmediately() + { + // Test that AbortOnFailure stops on validation failure (before download phase) + var keys = new[] { + "prefix/file1.txt", + "prefix/../../../escape.txt", // Path validation will fail + "prefix/file2.txt" + }; + + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var captured = new List(); + var tu = new TransferUtility(mockS3.Object); + var request = CreateRequest(localDir, FailurePolicy.AbortOnFailure); + + request.ObjectDownloadFailedEvent += (sender, args) => + { + captured.Add(args); + }; + + var ex = await Assert.ThrowsExceptionAsync( + () => tu.DownloadDirectoryAsync(request)); + Assert.IsTrue(ex.Message.Contains("not allowed outside")); + + // Verify event was raised for validation failure + Assert.AreEqual(1, captured.Count); + Assert.IsInstanceOfType(captured[0].Exception, typeof(AmazonClientException)); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task DownloadDirectory_MultipleValidationFailures_ContinueOnFailure_SkipsAllInvalid() + { + // Test that multiple path validation failures are all handled correctly + var keys = new[] { + "prefix/good1.txt", + "prefix/../../bad1.txt", + "prefix/good2.txt", + "prefix/../../../bad2.txt", + "prefix/good3.txt", + "prefix/../../../../bad3.txt" + }; + + var mockS3 = CreateMockS3(keys, k => false); + string localDir = CreateTempDirectory(); + try + { + var config = new TransferUtilityConfig(); + var request = CreateRequest(localDir, FailurePolicy.ContinueOnFailure); + + var command = new DownloadDirectoryCommand(mockS3.Object, request, config); + command.DownloadFilesConcurrently = request.DownloadFilesConcurrently; + var response = await command.ExecuteAsync(CancellationToken.None).ConfigureAwait(false); + + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsDownloaded, "Should download 3 valid files"); + Assert.AreEqual(3, response.ObjectsFailed, "Should have 3 path validation failures"); + Assert.AreEqual(DirectoryResult.PartialSuccess, response.Result); + + // All errors should be AmazonClientException + Assert.IsTrue(response.Errors.All(e => e is AmazonClientException)); + Assert.IsTrue(response.Errors.All(e => e.Message.Contains("not allowed outside"))); + } + finally + { + try { Directory.Delete(localDir, true); } catch { } + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/FileDownloadConfigurationTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FileDownloadConfigurationTests.cs new file mode 100644 index 000000000000..afc818d8906f --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FileDownloadConfigurationTests.cs @@ -0,0 +1,334 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class FileDownloadConfigurationTests + { + #region Constructor Tests - Valid Parameters + + [TestMethod] + public void Constructor_WithValidParameters_CreatesConfiguration() + { + // Arrange + var concurrentRequests = 5; + var bufferSize = 8192; + var partSize = 8 * 1024 * 1024; + var destinationPath = "test.dat"; + + // Act + var config = new FileDownloadConfiguration( + concurrentRequests, + bufferSize, + partSize, + destinationPath); + + // Assert + Assert.IsNotNull(config); + } + + [TestMethod] + public void Constructor_SetsConcurrentServiceRequests() + { + // Arrange + var expectedConcurrentRequests = 10; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + concurrentRequests: expectedConcurrentRequests); + + // Assert + Assert.AreEqual(expectedConcurrentRequests, config.ConcurrentServiceRequests); + } + + [TestMethod] + public void Constructor_SetsTargetPartSizeBytes() + { + // Arrange + var expectedPartSize = 16 * 1024 * 1024; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: expectedPartSize); + + // Assert + Assert.AreEqual(expectedPartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_SetsBufferSize() + { + // Arrange + var expectedBufferSize = 16384; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + bufferSize: expectedBufferSize); + + // Assert + Assert.AreEqual(expectedBufferSize, config.BufferSize); + } + + [TestMethod] + public void Constructor_SetsDestinationFilePath() + { + // Arrange + var expectedPath = Path.Combine(Path.GetTempPath(), "test-file.dat"); + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: expectedPath); + + // Assert + Assert.AreEqual(expectedPath, config.DestinationFilePath); + } + + #endregion + + #region Constructor Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroConcurrentServiceRequests_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(0, 8192, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeConcurrentServiceRequests_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(-1, 8192, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroBufferSize_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, 0, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeBufferSize_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, -1, 8 * 1024 * 1024, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithZeroTargetPartSizeBytes_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 0, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void Constructor_WithNegativeTargetPartSizeBytes_ThrowsArgumentOutOfRangeException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, -1, "test.dat"); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void Constructor_WithNullDestinationFilePath_ThrowsArgumentException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 8 * 1024 * 1024, null); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void Constructor_WithEmptyDestinationFilePath_ThrowsArgumentException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 8 * 1024 * 1024, ""); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public void Constructor_WithWhitespaceDestinationFilePath_ThrowsArgumentException() + { + // Act + var config = new FileDownloadConfiguration(10, 8192, 8 * 1024 * 1024, " "); + } + + #endregion + + #region Property Tests + + [TestMethod] + public void BufferSize_PropertyGetter_ReturnsCorrectValue() + { + // Arrange + var expectedBufferSize = 16384; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + bufferSize: expectedBufferSize); + + // Act + var actualBufferSize = config.BufferSize; + + // Assert + Assert.AreEqual(expectedBufferSize, actualBufferSize); + } + + [TestMethod] + public void DestinationFilePath_PropertyGetter_ReturnsCorrectValue() + { + // Arrange + var expectedPath = "test-file.dat"; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: expectedPath); + + // Act + var actualPath = config.DestinationFilePath; + + // Assert + Assert.AreEqual(expectedPath, actualPath); + } + + [TestMethod] + public void ConcurrentServiceRequests_InheritsFromBase() + { + // Arrange + var expectedValue = 15; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + concurrentRequests: expectedValue); + + // Act + var actualValue = config.ConcurrentServiceRequests; + + // Assert + Assert.AreEqual(expectedValue, actualValue); + } + + [TestMethod] + public void TargetPartSizeBytes_InheritsFromBase() + { + // Arrange + var expectedValue = 16 * 1024 * 1024; + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: expectedValue); + + // Act + var actualValue = config.TargetPartSizeBytes; + + // Assert + Assert.AreEqual(expectedValue, actualValue); + } + + #endregion + + #region Edge Case Tests + + [TestMethod] + public void Constructor_WithMinimumValidValues_CreatesConfiguration() + { + // Arrange & Act + var config = new FileDownloadConfiguration(1, 1, 1, "a"); + + // Assert + Assert.IsNotNull(config); + Assert.AreEqual(1, config.ConcurrentServiceRequests); + Assert.AreEqual(1, config.BufferSize); + Assert.AreEqual(1, config.TargetPartSizeBytes); + Assert.AreEqual("a", config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithLargeBufferSize_CreatesConfiguration() + { + // Arrange + var largeBufferSize = 1024 * 1024; // 1MB buffer + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + bufferSize: largeBufferSize); + + // Assert + Assert.AreEqual(largeBufferSize, config.BufferSize); + } + + [TestMethod] + public void Constructor_WithLargePartSize_CreatesConfiguration() + { + // Arrange + var largePartSize = 128L * 1024 * 1024; // 128MB + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: largePartSize); + + // Assert + Assert.AreEqual(largePartSize, config.TargetPartSizeBytes); + } + + [TestMethod] + public void Constructor_WithVeryLongFilePath_CreatesConfiguration() + { + // Arrange - Create a long but valid path + var longFileName = new string('a', 200) + ".dat"; + var longPath = Path.Combine(Path.GetTempPath(), longFileName); + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: longPath); + + // Assert + Assert.AreEqual(longPath, config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithFilePathContainingSpecialCharacters_CreatesConfiguration() + { + // Arrange + var specialPath = Path.Combine(Path.GetTempPath(), "test-file[1]@2024.dat"); + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: specialPath); + + // Assert + Assert.AreEqual(specialPath, config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithUNCPath_CreatesConfiguration() + { + // Arrange + var uncPath = @"\\server\share\file.dat"; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: uncPath); + + // Assert + Assert.AreEqual(uncPath, config.DestinationFilePath); + } + + [TestMethod] + public void Constructor_WithRelativePath_CreatesConfiguration() + { + // Arrange + var relativePath = @".\subfolder\file.dat"; + + // Act + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: relativePath); + + // Assert + Assert.AreEqual(relativePath, config.DestinationFilePath); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs new file mode 100644 index 000000000000..819ea95b2f2a --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerConcurrencyTests.cs @@ -0,0 +1,367 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Comprehensive concurrency tests for FilePartDataHandler to validate that concurrent file writes + /// using FileShare.Write and different offsets don't cause data corruption. + /// + /// Each test verifies every byte matches expected patterns after concurrent writes complete. + /// + [TestClass] + public class FilePartDataHandlerConcurrencyTests + { + private string _testDirectory; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Helper Methods + + /// + /// Executes a concurrent write test with the specified parameters and returns the final file data. + /// + /// Name of the file to create + /// Size of each part in bytes + /// Number of parts to write + /// Optional custom write order. If null, writes parts sequentially (1,2,3...) + /// The final file data as byte array + private async Task ExecuteConcurrentWriteTest( + string fileName, + int partSize, + int partCount, + int[] writeOrder = null) + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, fileName); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: partSize, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Determine write order (default to sequential if not specified) + var order = writeOrder ?? Enumerable.Range(1, partCount).ToArray(); + + // Act - Create and execute all write tasks concurrently + var tasks = new Task[partCount]; + for (int i = 0; i < partCount; i++) + { + var partNum = order[i]; + var offset = (partNum - 1) * partSize; + var partData = MultipartDownloadTestHelpers.GeneratePartSpecificData(partSize, partNum); + + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + partSize - 1}/{partCount * partSize}" + }; + + tasks[i] = handler.ProcessPartAsync(partNum, response, CancellationToken.None); + } + + await Task.WhenAll(tasks); + handler.OnDownloadComplete(null); + + // Return the final file data for verification + Assert.IsTrue(File.Exists(destinationPath), "Destination file should exist"); + return File.ReadAllBytes(destinationPath); + } + + /// + /// Executes a concurrent write test with varying part sizes and returns the final file data. + /// + private async Task ExecuteVaryingSizeTest( + string fileName, + (int PartNum, int Size, int Offset)[] partDefinitions, + int[] writeOrder = null) + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, fileName); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var totalSize = partDefinitions.Sum(p => p.Size); + var order = writeOrder ?? Enumerable.Range(0, partDefinitions.Length).ToArray(); + + // Act - Write parts with varying sizes + var tasks = new Task[partDefinitions.Length]; + for (int i = 0; i < order.Length; i++) + { + var partIdx = order[i]; + var part = partDefinitions[partIdx]; + var partData = MultipartDownloadTestHelpers.GeneratePartSpecificData(part.Size, part.PartNum); + + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {part.Offset}-{part.Offset + part.Size - 1}/{totalSize}" + }; + + tasks[i] = handler.ProcessPartAsync(part.PartNum, response, CancellationToken.None); + } + + await Task.WhenAll(tasks); + handler.OnDownloadComplete(null); + + Assert.IsTrue(File.Exists(destinationPath)); + return File.ReadAllBytes(destinationPath); + } + + /// + /// Verifies that every byte in the file matches the expected pattern for uniform part sizes. + /// + private void VerifyAllBytes(byte[] fileData, int partSize, int partCount) + { + Assert.AreEqual(partCount * partSize, fileData.Length, "File size mismatch"); + + for (int i = 0; i < partCount; i++) + { + var expectedData = MultipartDownloadTestHelpers.GeneratePartSpecificData(partSize, i + 1); + var actualData = fileData.Skip(i * partSize).Take(partSize).ToArray(); + CollectionAssert.AreEqual(expectedData, actualData, $"Part {i + 1} data corrupted"); + } + } + + /// + /// Verifies that every byte in the file matches the expected pattern for varying part sizes. + /// + private void VerifyVaryingSizeBytes(byte[] fileData, (int PartNum, int Size, int Offset)[] partDefinitions) + { + var totalSize = partDefinitions.Sum(p => p.Size); + Assert.AreEqual(totalSize, fileData.Length, "File size mismatch"); + + foreach (var part in partDefinitions) + { + var expectedData = MultipartDownloadTestHelpers.GeneratePartSpecificData(part.Size, part.PartNum); + var actualData = fileData.Skip(part.Offset).Take(part.Size).ToArray(); + CollectionAssert.AreEqual(expectedData, actualData, + $"Part {part.PartNum} (size {part.Size} at offset {part.Offset}) corrupted"); + } + } + + #endregion + + #region Sequential Order Tests + + [TestMethod] + public async Task ConcurrentWrites_SequentialOrder_VerifyEveryByte() + { + // Act - Write parts in sequential order (1, 2, 3, 4, 5) concurrently + var partSize = 4096; // 4KB per part + var partCount = 5; + var fileData = await ExecuteConcurrentWriteTest("sequential.dat", partSize, partCount); + + // Assert - Verify temp files cleaned up and every byte matches expected pattern + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length, "Temp files should be cleaned up after commit"); + + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_SequentialOrder_LargeParts_VerifyEveryByte() + { + // Act - Write large parts concurrently in sequential order + var partSize = 1024 * 1024; // 1MB per part + var partCount = 3; + var fileData = await ExecuteConcurrentWriteTest("sequential-large.dat", partSize, partCount); + + // Assert - Verify every byte + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region Reverse Order Tests + + [TestMethod] + public async Task ConcurrentWrites_ReverseOrder_VerifyEveryByte() + { + // Act - Write parts in reverse order (5, 4, 3, 2, 1) concurrently + var partSize = 4096; // 4KB per part + var partCount = 5; + var reverseOrder = Enumerable.Range(1, partCount).Reverse().ToArray(); + var fileData = await ExecuteConcurrentWriteTest("reverse.dat", partSize, partCount, reverseOrder); + + // Assert - Verify every byte matches expected pattern + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_ReverseOrder_LargeParts_VerifyEveryByte() + { + // Act - Write large parts in reverse order concurrently + var partSize = 1024 * 1024; // 1MB per part + var partCount = 3; + var reverseOrder = Enumerable.Range(1, partCount).Reverse().ToArray(); + var fileData = await ExecuteConcurrentWriteTest("reverse-large.dat", partSize, partCount, reverseOrder); + + // Assert - Verify every byte + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region Random Order Tests + + [TestMethod] + public async Task ConcurrentWrites_RandomOrder_VerifyEveryByte() + { + // Act - Write parts in random order (3, 1, 7, 2, 5, 8, 4, 6) concurrently + var partSize = 4096; // 4KB per part + var partCount = 8; + var randomOrder = new[] { 3, 1, 7, 2, 5, 8, 4, 6 }; + var fileData = await ExecuteConcurrentWriteTest("random.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte matches expected pattern + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_ComplexRandomOrder_VerifyEveryByte() + { + // Act - Write parts in complex random order concurrently + var partSize = 8192; // 8KB per part + var partCount = 12; + var randomOrder = new[] { 7, 2, 11, 4, 1, 9, 12, 3, 6, 10, 5, 8 }; + var fileData = await ExecuteConcurrentWriteTest("complex-random.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region High Concurrency Tests + + [TestMethod] + public async Task ConcurrentWrites_TwentyParts_VerifyEveryByte() + { + // Act - Write 20 parts concurrently in random order + var partSize = 4096; // 4KB per part + var partCount = 20; + var randomOrder = Enumerable.Range(1, partCount).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteConcurrentWriteTest("twenty-parts.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte across all 20 parts + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_FiftyParts_VerifyEveryByte() + { + // Act - Write 50 parts concurrently in random order + var partSize = 2048; // 2KB per part (smaller to keep test fast) + var partCount = 50; + var randomOrder = Enumerable.Range(1, partCount).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteConcurrentWriteTest("fifty-parts.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte across all 50 parts + VerifyAllBytes(fileData, partSize, partCount); + } + + [TestMethod] + public async Task ConcurrentWrites_HighConcurrency_StressTest_VerifyEveryByte() + { + // Act - Write all parts concurrently with maximum parallelism (stress test) + var partSize = 4096; // 4KB per part + var partCount = 30; + var randomOrder = Enumerable.Range(1, partCount).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteConcurrentWriteTest("stress-test.dat", partSize, partCount, randomOrder); + + // Assert - Verify every byte even under high contention + VerifyAllBytes(fileData, partSize, partCount); + } + + #endregion + + #region Varying Part Size Tests + + [TestMethod] + public async Task ConcurrentWrites_VaryingPartSizes_VerifyEveryByte() + { + // Act - Write parts with varying sizes (1KB, 4KB, 8KB, 2KB, 16KB) concurrently + var partSizes = new[] { 1024, 4096, 8192, 2048, 16384 }; + var offset = 0; + var partDefinitions = partSizes.Select((size, i) => + { + var part = (PartNum: i + 1, Size: size, Offset: offset); + offset += size; + return part; + }).ToArray(); + + var fileData = await ExecuteVaryingSizeTest("varying-sizes.dat", partDefinitions); + + // Assert - Verify every byte with varying sizes + VerifyVaryingSizeBytes(fileData, partDefinitions); + } + + [TestMethod] + public async Task ConcurrentWrites_VaryingSizesRandomOrder_VerifyEveryByte() + { + // Act - Write varying size parts in random order + var partDefinitions = new[] + { + (PartNum: 1, Size: 2048, Offset: 0), + (PartNum: 2, Size: 8192, Offset: 2048), + (PartNum: 3, Size: 4096, Offset: 10240), + (PartNum: 4, Size: 16384, Offset: 14336), + (PartNum: 5, Size: 1024, Offset: 30720) + }; + var randomOrder = Enumerable.Range(0, partDefinitions.Length).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteVaryingSizeTest("varying-sizes-random.dat", partDefinitions, randomOrder); + + // Assert - Verify every byte across varying sizes + VerifyVaryingSizeBytes(fileData, partDefinitions); + } + + #endregion + + #region Mixed Scenario Tests + + [TestMethod] + public async Task ConcurrentWrites_MixedScenario_SmallAndLargeParts_VerifyEveryByte() + { + // Act - Write mixed size parts (100 bytes, 1MB, 500 bytes, 2MB, 1KB) in random order + var partDefinitions = new[] + { + (PartNum: 1, Size: 100, Offset: 0), + (PartNum: 2, Size: 1024 * 1024, Offset: 100), + (PartNum: 3, Size: 500, Offset: 100 + 1024 * 1024), + (PartNum: 4, Size: 2 * 1024 * 1024, Offset: 100 + 1024 * 1024 + 500), + (PartNum: 5, Size: 1024, Offset: 100 + 1024 * 1024 + 500 + 2 * 1024 * 1024) + }; + var randomOrder = Enumerable.Range(0, partDefinitions.Length).OrderBy(x => Guid.NewGuid()).ToArray(); + var fileData = await ExecuteVaryingSizeTest("mixed-scenario.dat", partDefinitions, randomOrder); + + // Assert - Verify every byte in mixed scenario + VerifyVaryingSizeBytes(fileData, partDefinitions); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs new file mode 100644 index 000000000000..46c2a7536bb4 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/FilePartDataHandlerTests.cs @@ -0,0 +1,1017 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class FilePartDataHandlerTests + { + private string _testDirectory; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidConfig_CreatesHandler() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var handler = new FilePartDataHandler(config); + + // Assert + Assert.IsNotNull(handler); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfig_ThrowsArgumentNullException() + { + // Act + var handler = new FilePartDataHandler(null); + } + + #endregion + + #region PrepareAsync Tests + + [TestMethod] + public async Task PrepareAsync_CreatesTempFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + var discoveryResult = new DownloadResult + { + TotalParts = 1, + ObjectSize = 1024 + }; + + // Act + await handler.PrepareAsync(discoveryResult, CancellationToken.None); + + // Assert - Check temp file exists with .s3tmp. pattern + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(1, tempFiles.Length); + Assert.IsTrue(File.Exists(tempFiles[0])); + } + + [TestMethod] + public async Task PrepareAsync_TempFileFollowsPattern() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "myfile.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + var discoveryResult = new DownloadResult(); + + // Act + await handler.PrepareAsync(discoveryResult, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "myfile.dat.s3tmp.*"); + Assert.AreEqual(1, tempFiles.Length); + } + + [TestMethod] + public async Task PrepareAsync_ReturnsCompletedTask() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + var discoveryResult = new DownloadResult(); + + // Act + var task = handler.PrepareAsync(discoveryResult, CancellationToken.None); + + // Assert + Assert.IsTrue(task.IsCompleted); + await task; + } + + #endregion + + #region ProcessPartAsync Tests - Basic Functionality + + [TestMethod] + public async Task ProcessPartAsync_WritesDataToFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(1, tempFiles.Length); + + var writtenData = File.ReadAllBytes(tempFiles[0]); + CollectionAssert.AreEqual(partData, writtenData); + } + + [TestMethod] + public async Task ProcessPartAsync_WritesAtCorrectOffset() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Write part 2 (offset 1024) + var part2Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 1024); + var response = new GetObjectResponse + { + ContentLength = part2Data.Length, + ResponseStream = new MemoryStream(part2Data), + ContentRange = "bytes 1024-2047/2048" + }; + + // Act + await handler.ProcessPartAsync(2, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + // Verify data is at offset 1024 + var actualPart2Data = fileData.Skip(1024).Take(1024).ToArray(); + CollectionAssert.AreEqual(part2Data, actualPart2Data); + } + + #endregion + + #region ProcessPartAsync Tests - Offset Calculation + + [TestMethod] + public async Task ProcessPartAsync_ParsesContentRangeForOffset() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 8388608-8388707/33555032" // Offset 8MB + }; + + // Act + await handler.ProcessPartAsync(2, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + // Verify data is at offset 8388608 + var actualData = fileData.Skip(8388608).Take(100).ToArray(); + CollectionAssert.AreEqual(partData, actualData); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ProcessPartAsync_MissingContentRange_ThrowsInvalidOperationException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1000, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = null // Missing ContentRange should throw + }; + + // Act - Should throw InvalidOperationException + await handler.ProcessPartAsync(3, response, CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ProcessPartAsync_InvalidContentRange_ThrowsInvalidOperationException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1000, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "invalid-format" // Invalid ContentRange should throw + }; + + // Act - Should throw InvalidOperationException + await handler.ProcessPartAsync(2, response, CancellationToken.None); + } + + #endregion + + #region ProcessPartAsync Tests - Data Integrity + + [TestMethod] + public async Task ProcessPartAsync_PreservesDataIntegrity() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.CreateMixedPattern(10240, 42); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-10239/10240" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var writtenData = File.ReadAllBytes(tempFiles[0]); + CollectionAssert.AreEqual(partData, writtenData); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesZeroByteResponse() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var response = new GetObjectResponse + { + ContentLength = 0, + ResponseStream = new MemoryStream(Array.Empty()), + ContentRange = "bytes 0-0/0" + }; + + // Act & Assert - Should not throw + await handler.ProcessPartAsync(1, response, CancellationToken.None); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesSmallPart() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-99/100" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var writtenData = File.ReadAllBytes(tempFiles[0]); + CollectionAssert.AreEqual(partData, writtenData); + } + + [TestMethod] + public async Task ProcessPartAsync_HandlesLargePart() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partSize = 16 * 1024 * 1024; // 16MB + var partData = MultipartDownloadTestHelpers.GenerateTestData(partSize, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes 0-{partSize - 1}/{partSize}" + }; + + // Act + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(tempFiles[0], partSize)); + } + + [TestMethod] + public async Task ProcessPartAsync_MultipleWritesPreserveAllData() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Write part 1 + var part1Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response1 = new GetObjectResponse + { + ContentLength = part1Data.Length, + ResponseStream = new MemoryStream(part1Data), + ContentRange = "bytes 0-1023/2048" + }; + await handler.ProcessPartAsync(1, response1, CancellationToken.None); + + // Write part 2 + var part2Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 1024); + var response2 = new GetObjectResponse + { + ContentLength = part2Data.Length, + ResponseStream = new MemoryStream(part2Data), + ContentRange = "bytes 1024-2047/2048" + }; + await handler.ProcessPartAsync(2, response2, CancellationToken.None); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + var actualPart1 = fileData.Take(1024).ToArray(); + var actualPart2 = fileData.Skip(1024).Take(1024).ToArray(); + + CollectionAssert.AreEqual(part1Data, actualPart1); + CollectionAssert.AreEqual(part2Data, actualPart2); + } + + #endregion + + #region ProcessPartAsync Tests - Concurrent Writes + + [TestMethod] + public async Task ProcessPartAsync_SupportsConcurrentWrites() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Create multiple parts + var part1Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var part2Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 1024); + var part3Data = MultipartDownloadTestHelpers.GenerateTestData(1024, 2048); + + var response1 = new GetObjectResponse + { + ContentLength = part1Data.Length, + ResponseStream = new MemoryStream(part1Data), + ContentRange = "bytes 0-1023/3072" + }; + var response2 = new GetObjectResponse + { + ContentLength = part2Data.Length, + ResponseStream = new MemoryStream(part2Data), + ContentRange = "bytes 1024-2047/3072" + }; + var response3 = new GetObjectResponse + { + ContentLength = part3Data.Length, + ResponseStream = new MemoryStream(part3Data), + ContentRange = "bytes 2048-3071/3072" + }; + + // Act - Write all parts concurrently + var tasks = new[] + { + handler.ProcessPartAsync(1, response1, CancellationToken.None), + handler.ProcessPartAsync(2, response2, CancellationToken.None), + handler.ProcessPartAsync(3, response3, CancellationToken.None) + }; + await Task.WhenAll(tasks); + + // Assert + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + var actualPart1 = fileData.Take(1024).ToArray(); + var actualPart2 = fileData.Skip(1024).Take(1024).ToArray(); + var actualPart3 = fileData.Skip(2048).Take(1024).ToArray(); + + CollectionAssert.AreEqual(part1Data, actualPart1); + CollectionAssert.AreEqual(part2Data, actualPart2); + CollectionAssert.AreEqual(part3Data, actualPart3); + } + + [TestMethod] + public async Task ProcessPartAsync_ConcurrentWritesDontInterfere() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Create 10 parts with distinct patterns + var tasks = new Task[10]; + for (int i = 0; i < 10; i++) + { + var partNum = i + 1; + var offset = i * 1024; + var partData = MultipartDownloadTestHelpers.GeneratePartSpecificData(1024, partNum); + + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + 1023}/10240" + }; + + tasks[i] = handler.ProcessPartAsync(partNum, response, CancellationToken.None); + } + + // Act + await Task.WhenAll(tasks); + + // Assert - Each part should have its distinct pattern + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + var fileData = File.ReadAllBytes(tempFiles[0]); + + for (int i = 0; i < 10; i++) + { + var expectedData = MultipartDownloadTestHelpers.GeneratePartSpecificData(1024, i + 1); + var actualData = fileData.Skip(i * 1024).Take(1024).ToArray(); + CollectionAssert.AreEqual(expectedData, actualData, $"Part {i + 1} data mismatch"); + } + } + + #endregion + + #region ProcessPartAsync Tests - Error Handling + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ProcessPartAsync_WithoutPrepare_ThrowsInvalidOperationException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + + // Act - Without calling PrepareAsync first + await handler.ProcessPartAsync(1, response, CancellationToken.None); + } + + #endregion + + #region ProcessPartAsync Tests - Cancellation + + [TestMethod] + [ExpectedException(typeof(TaskCanceledException))] + public async Task ProcessPartAsync_WithCancelledToken_ThrowsTaskCanceledException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await handler.ProcessPartAsync(1, response, cts.Token); + } + + #endregion + + #region WaitForCapacityAsync Tests + + [TestMethod] + public async Task WaitForCapacityAsync_ReturnsImmediately() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act + var task = handler.WaitForCapacityAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(task.IsCompleted); + await task; + } + + [TestMethod] + public async Task WaitForCapacityAsync_CanBeCalledMultipleTimes() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert + await handler.WaitForCapacityAsync(CancellationToken.None); + await handler.WaitForCapacityAsync(CancellationToken.None); + await handler.WaitForCapacityAsync(CancellationToken.None); + } + + #endregion + + #region ReleaseCapacity Tests + + [TestMethod] + public void ReleaseCapacity_DoesNotThrow() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert + handler.ReleaseCapacity(); + } + + [TestMethod] + public void ReleaseCapacity_CanBeCalledMultipleTimes() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert + handler.ReleaseCapacity(); + handler.ReleaseCapacity(); + handler.ReleaseCapacity(); + } + + #endregion + + #region OnDownloadComplete Tests - Success Path + + [TestMethod] + public async Task OnDownloadComplete_WithSuccess_CommitsTempFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "final.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + // Act + handler.OnDownloadComplete(null); // null = success + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + + var finalData = File.ReadAllBytes(destinationPath); + CollectionAssert.AreEqual(partData, finalData); + } + + [TestMethod] + public async Task OnDownloadComplete_WithSuccess_DestinationContainsAllData() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "complete.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Write 3 parts + for (int i = 0; i < 3; i++) + { + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, i * 1024); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {i * 1024}-{(i + 1) * 1024 - 1}/3072" + }; + await handler.ProcessPartAsync(i + 1, response, CancellationToken.None); + } + + // Act + handler.OnDownloadComplete(null); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyMultipartFileContents( + destinationPath, 3, 1024, 0)); + } + + #endregion + + #region OnDownloadComplete Tests - Failure Path + + [TestMethod] + public async Task OnDownloadComplete_WithFailure_CleansTempFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "failed.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Act + handler.OnDownloadComplete(new Exception("Download failed")); + + // Assert + Assert.IsFalse(File.Exists(destinationPath)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + } + + [TestMethod] + public async Task OnDownloadComplete_WithDifferentExceptions_AllHandledCorrectly() + { + // Test with OperationCanceledException + var destinationPath1 = Path.Combine(_testDirectory, "cancelled.dat"); + var config1 = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath1); + var handler1 = new FilePartDataHandler(config1); + await handler1.PrepareAsync(new DownloadResult(), CancellationToken.None); + handler1.OnDownloadComplete(new OperationCanceledException()); + Assert.IsFalse(File.Exists(destinationPath1)); + + // Test with IOException + var destinationPath2 = Path.Combine(_testDirectory, "ioerror.dat"); + var config2 = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath2); + var handler2 = new FilePartDataHandler(config2); + await handler2.PrepareAsync(new DownloadResult(), CancellationToken.None); + handler2.OnDownloadComplete(new IOException("IO error")); + Assert.IsFalse(File.Exists(destinationPath2)); + } + + #endregion + + #region Dispose Tests + + [TestMethod] + public async Task Dispose_CleansUpUncommittedFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "disposed.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Act + handler.Dispose(); + + // Assert - Temp file should be cleaned up, destination should not exist + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + Assert.IsFalse(File.Exists(destinationPath)); + } + + [TestMethod] + public async Task Dispose_AfterCommit_DoesNotDeleteDestination() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "committed.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + handler.OnDownloadComplete(null); // Commit + + // Act + handler.Dispose(); + + // Assert - Destination should still exist + Assert.IsTrue(File.Exists(destinationPath)); + var finalData = File.ReadAllBytes(destinationPath); + CollectionAssert.AreEqual(partData, finalData); + } + + [TestMethod] + public void Dispose_CanBeCalledMultipleTimes() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert - Multiple calls should not throw + handler.Dispose(); + handler.Dispose(); + handler.Dispose(); + } + + [TestMethod] + public void Dispose_WithoutPrepare_DoesNotThrow() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: Path.Combine(_testDirectory, "test.dat")); + var handler = new FilePartDataHandler(config); + + // Act & Assert - Should not throw even if PrepareAsync was never called + handler.Dispose(); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public async Task Integration_CompleteWorkflow_ProducesCorrectFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "integration.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + // Act - Simulate complete download workflow + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Download 5 parts + for (int i = 0; i < 5; i++) + { + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, i * 1024); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {i * 1024}-{(i + 1) * 1024 - 1}/5120" + }; + await handler.ProcessPartAsync(i + 1, response, CancellationToken.None); + } + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyMultipartFileContents( + destinationPath, 5, 1024, 0)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + } + + [TestMethod] + public async Task Integration_ParallelDownload_ProducesCorrectFile() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "parallel.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024, + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Act - Download parts in parallel (reverse order to test offset handling) + var tasks = new Task[5]; + for (int i = 4; i >= 0; i--) + { + var partNum = i + 1; + var offset = i * 1024; + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, offset); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + 1023}/5120" + }; + tasks[4 - i] = handler.ProcessPartAsync(partNum, response, CancellationToken.None); + } + await Task.WhenAll(tasks); + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyMultipartFileContents( + destinationPath, 5, 1024, 0)); + } + + [TestMethod] + public async Task Integration_FailedDownload_CleansUpProperly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "failed-integration.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + // Act + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + var partData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-1023/1024" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + handler.OnDownloadComplete(new Exception("Simulated failure")); + handler.Dispose(); + + // Assert - No files should remain + Assert.IsFalse(File.Exists(destinationPath)); + Assert.AreEqual(0, Directory.GetFiles(_testDirectory, "*.s3tmp.*").Length); + } + + [TestMethod] + public async Task Integration_LargeFileDownload_HandlesCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "large-integration.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + partSize: 1024 * 1024, // 1MB parts + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Act - Download 3 parts of 1MB each + for (int i = 0; i < 3; i++) + { + var partSize = 1024 * 1024; + var offset = i * partSize; + var partData = MultipartDownloadTestHelpers.GenerateTestData(partSize, offset); + var response = new GetObjectResponse + { + ContentLength = partData.Length, + ResponseStream = new MemoryStream(partData), + ContentRange = $"bytes {offset}-{offset + partSize - 1}/{3 * partSize}" + }; + await handler.ProcessPartAsync(i + 1, response, CancellationToken.None); + } + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + var expectedSize = 3 * 1024 * 1024; + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, expectedSize)); + } + + [TestMethod] + public async Task Integration_SingleByteFile_HandlesCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "single-byte.dat"); + var config = MultipartDownloadTestHelpers.CreateFileDownloadConfiguration( + destinationPath: destinationPath); + var handler = new FilePartDataHandler(config); + + await handler.PrepareAsync(new DownloadResult(), CancellationToken.None); + + // Act - Download single byte + var partData = new byte[] { 0x42 }; + var response = new GetObjectResponse + { + ContentLength = 1, + ResponseStream = new MemoryStream(partData), + ContentRange = "bytes 0-0/1" + }; + await handler.ProcessPartAsync(1, response, CancellationToken.None); + + handler.OnDownloadComplete(null); + handler.Dispose(); + + // Assert + Assert.IsTrue(File.Exists(destinationPath)); + var fileData = File.ReadAllBytes(destinationPath); + Assert.AreEqual(1, fileData.Length); + Assert.AreEqual(0x42, fileData[0]); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs new file mode 100644 index 000000000000..b0b0ebba9f5e --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadCommandTests.cs @@ -0,0 +1,984 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Amazon.S3.Util; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class MultipartDownloadCommandTests + { + private string _testDirectory; + private Mock _mockS3Client; + private TransferUtilityConfig _config; + + [TestInitialize] + public void Setup() + { + _testDirectory = MultipartDownloadTestHelpers.CreateTempDirectory(); + _mockS3Client = new Mock(); + _config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 4 + }; + + // Setup default S3 client config + var s3Config = new AmazonS3Config + { + BufferSize = 8192, + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + } + + [TestCleanup] + public void Cleanup() + { + MultipartDownloadTestHelpers.CleanupTempDirectory(_testDirectory); + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesCommand() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Assert + Assert.IsNotNull(command); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullS3Client_ThrowsArgumentNullException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(null, request, _config); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullRequest_ThrowsArgumentNullException() + { + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, null, _config); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfig_ThrowsArgumentNullException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, null); + } + + [TestMethod] + public void Constructor_WithSharedHttpThrottler_CreatesCommand() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + var sharedThrottler = new SemaphoreSlim(10); + + try + { + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Assert + Assert.IsNotNull(command); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public void Constructor_WithNullSharedHttpThrottler_CreatesCommand() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: Path.Combine(_testDirectory, "test.dat")); + + // Act + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedHttpThrottler: null); + + // Assert + Assert.IsNotNull(command); + } + + #endregion + + #region ValidateRequest Tests + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + bucketName: null, + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyBucketName_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + bucketName: "", + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingKey_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + key: null, + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyKey_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + key: "", + filePath: Path.Combine(_testDirectory, "test.dat")); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + +#if BCL + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithMissingFilePath_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest(filePath: null); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ExecuteAsync_WithEmptyFilePath_ThrowsInvalidOperationException() + { + // Arrange + var request = MultipartDownloadTestHelpers.CreateDownloadRequest(filePath: ""); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } +#endif + + #endregion + + #region CreateConfiguration Tests + + [TestMethod] + public async Task ExecuteAsync_UsesRequestPartSize_WhenSet() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var customPartSize = 16 * 1024 * 1024; // 16MB + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath, + partSize: customPartSize); + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert - Verify coordinator was called (validates config was created) + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task ExecuteAsync_UsesDefaultPartSize_WhenNotSet() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + // Don't set PartSize - should use 8MB default + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert - Verify coordinator was called + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task ExecuteAsync_UsesConcurrentRequestsFromConfig() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + _config.ConcurrentServiceRequests = 10; + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task ExecuteAsync_UsesBufferSizeFromS3ClientConfig() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var s3Config = new AmazonS3Config + { + BufferSize = 16384, // Custom buffer size + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + + SetupSuccessfulSinglePartDownload(1024); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + // Verify the command executed successfully with custom buffer size + _mockS3Client.Verify(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny()), Times.Once); + } + + #endregion + + #region ExecuteAsync Tests - Single Part Download + + [TestMethod] + public async Task ExecuteAsync_SinglePartDownload_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "single-part.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + } + + [TestMethod] + public async Task ExecuteAsync_SinglePartDownload_SetsContentLengthCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 2048; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(fileSize, response.Headers.ContentLength); + } + + [TestMethod] + public async Task ExecuteAsync_SinglePartDownload_SetsContentRangeCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual($"bytes 0-{fileSize - 1}/{fileSize}", response.ContentRange); + } + + #endregion + + #region ExecuteAsync Tests - Response Mapping + + [TestMethod] + public async Task ExecuteAsync_MapsETagCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var expectedETag = "\"abc123def456\""; + SetupSuccessfulSinglePartDownload(1024, eTag: expectedETag); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(expectedETag, response.ETag); + } + + [TestMethod] + public async Task ExecuteAsync_MapsServerSideEncryptionMethodCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, + serverSideEncryptionMethod: ServerSideEncryptionMethod.AES256); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(ServerSideEncryptionMethod.AES256, response.ServerSideEncryptionMethod); + } + + [TestMethod] + public async Task ExecuteAsync_MapsServerSideEncryptionKeyManagementServiceKeyId() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var kmsKeyId = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"; + SetupSuccessfulSinglePartDownload(1024, + serverSideEncryptionKeyManagementServiceKeyId: kmsKeyId); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(kmsKeyId, response.ServerSideEncryptionKeyManagementServiceKeyId); + } + + [TestMethod] + public async Task ExecuteAsync_MapsServerSideEncryptionCustomerMethod() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, + serverSideEncryptionCustomerMethod: ServerSideEncryptionCustomerMethod.AES256); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(ServerSideEncryptionCustomerMethod.AES256, + response.ServerSideEncryptionCustomerMethod); + } + + [TestMethod] + public async Task ExecuteAsync_MapsMetadataCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var metadata = new MetadataCollection(); + metadata["x-amz-meta-custom"] = "custom-value"; + SetupSuccessfulSinglePartDownload(1024, metadata: metadata); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response.Metadata); + Assert.IsTrue(response.Metadata.Count > 0); + } + + #endregion + + #region ExecuteAsync Tests - Composite Checksum Handling + + [TestMethod] + public async Task ExecuteAsync_CompositeChecksum_SetsAllChecksumsToNull() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, + checksumType: ChecksumType.COMPOSITE, + checksumCRC32: "somecrc32", + checksumSHA256: "somesha256"); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(ChecksumType.COMPOSITE, response.ChecksumType); + Assert.IsNull(response.ChecksumCRC32); + Assert.IsNull(response.ChecksumCRC32C); + Assert.IsNull(response.ChecksumCRC64NVME); + Assert.IsNull(response.ChecksumSHA1); + Assert.IsNull(response.ChecksumSHA256); + } + + [TestMethod] + public async Task ExecuteAsync_NonCompositeChecksum_PreservesChecksums() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var expectedCRC32 = "somecrc32value"; + SetupSuccessfulSinglePartDownload(1024, + checksumType: null, // Not composite + checksumCRC32: expectedCRC32); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(expectedCRC32, response.ChecksumCRC32); + } + + [TestMethod] + public async Task ExecuteAsync_NullChecksumType_DoesNotThrow() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(1024, checksumType: null); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act & Assert - Should not throw + var response = await command.ExecuteAsync(CancellationToken.None); + Assert.IsNotNull(response); + } + + #endregion + + #region ExecuteAsync Tests - Error Handling + + [TestMethod] + [ExpectedException(typeof(AmazonS3Exception))] + public async Task ExecuteAsync_S3ClientThrows_PropagatesException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("S3 error")); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(CancellationToken.None); + } + + [TestMethod] + public async Task ExecuteAsync_Exception_CleansUpTempFiles() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(new Exception("Download failed")); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + try + { + await command.ExecuteAsync(CancellationToken.None); + } + catch + { + // Expected exception + } + + // Assert - No temp files should remain + await Task.Delay(100); // Give cleanup time to complete + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length); + } + + #endregion + + #region ExecuteAsync Tests - Cancellation + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task ExecuteAsync_WithCancelledToken_ThrowsOperationCanceledException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + await command.ExecuteAsync(cts.Token); + } + + [TestMethod] + public async Task ExecuteAsync_CancellationDuringDownload_CleansUpProperly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "test.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var cts = new CancellationTokenSource(); + + // Setup mock to cancel after being called + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .Callback(() => cts.Cancel()) + .ThrowsAsync(new OperationCanceledException()); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + try + { + await command.ExecuteAsync(cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - Temp files should be cleaned up + await Task.Delay(100); // Give cleanup time to complete + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public async Task Integration_SmallFileDownload_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "small-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 512; // Small file + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + Assert.AreEqual(fileSize, response.Headers.ContentLength); + Assert.AreEqual($"bytes 0-{fileSize - 1}/{fileSize}", response.ContentRange); + + // Verify no temp files remain + var tempFiles = Directory.GetFiles(_testDirectory, "*.s3tmp.*"); + Assert.AreEqual(0, tempFiles.Length); + } + + [TestMethod] + public async Task Integration_LargeFileDownload_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "large-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024 * 1024; // 1MB file + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + Assert.AreEqual(fileSize, response.Headers.ContentLength); + } + + [TestMethod] + public async Task Integration_ZeroByteFile_HandlesCorrectly() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "empty-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + SetupSuccessfulSinglePartDownload(0); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.AreEqual(0, new FileInfo(destinationPath).Length); + } + + [TestMethod] + public async Task Integration_OverwriteExistingFile_SucceedsAndOverwrites() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "overwrite-test.dat"); + + // Create existing file with different content + var oldData = MultipartDownloadTestHelpers.GenerateTestData(512, 999); + File.WriteAllBytes(destinationPath, oldData); + + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var newFileSize = 1024; + SetupSuccessfulSinglePartDownload(newFileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, newFileSize)); + + // Verify content was overwritten (not same as oldData) + var newData = File.ReadAllBytes(destinationPath); + Assert.AreNotEqual(oldData.Length, newData.Length); + } + + [TestMethod] + public async Task Integration_NestedDirectory_CreatesDirectoryAndDownloads() + { + // Arrange + var nestedPath = Path.Combine(_testDirectory, "level1", "level2", "level3", "nested-file.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: nestedPath); + + var fileSize = 2048; + SetupSuccessfulSinglePartDownload(fileSize); + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(nestedPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(nestedPath, fileSize)); + } + + #endregion + + #region Shared HTTP Throttler Tests + + [TestMethod] + public async Task ExecuteAsync_WithSharedHttpThrottler_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "throttled-download.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + + var sharedThrottler = new SemaphoreSlim(10); + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public async Task ExecuteAsync_WithoutSharedHttpThrottler_CompletesSuccessfully() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "no-throttler-download.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedHttpThrottler: null); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyFileSize(destinationPath, fileSize)); + } + + [TestMethod] + public async Task ExecuteAsync_SharedHttpThrottler_DoesNotBlockSinglePartDownload() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "single-part-throttled.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 512; // Small file (single part) + SetupSuccessfulSinglePartDownload(fileSize); + + // Create throttler with limited capacity + var sharedThrottler = new SemaphoreSlim(1); + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsTrue(File.Exists(destinationPath)); + + // Verify throttler was not exhausted (single part doesn't use it heavily) + Assert.AreEqual(1, sharedThrottler.CurrentCount); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public async Task ExecuteAsync_SharedHttpThrottler_ReleasedOnSuccess() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "throttler-released.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + var fileSize = 1024; + SetupSuccessfulSinglePartDownload(fileSize); + + var sharedThrottler = new SemaphoreSlim(5); + var initialCount = sharedThrottler.CurrentCount; + + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert - throttler should be back to initial state + Assert.AreEqual(initialCount, sharedThrottler.CurrentCount); + } + finally + { + sharedThrottler.Dispose(); + } + } + + [TestMethod] + public async Task ExecuteAsync_SharedHttpThrottler_ReleasedOnException() + { + // Arrange + var destinationPath = Path.Combine(_testDirectory, "throttler-released-error.dat"); + var request = MultipartDownloadTestHelpers.CreateDownloadRequest( + filePath: destinationPath); + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(new AmazonS3Exception("Test exception")); + + var sharedThrottler = new SemaphoreSlim(5); + var initialCount = sharedThrottler.CurrentCount; + + try + { + var command = new MultipartDownloadCommand(_mockS3Client.Object, request, _config, sharedThrottler); + + // Act & Assert + await Assert.ThrowsExceptionAsync( + async () => await command.ExecuteAsync(CancellationToken.None)); + + // Throttler should be back to initial state even after exception + Assert.AreEqual(initialCount, sharedThrottler.CurrentCount); + } + finally + { + sharedThrottler.Dispose(); + } + } + + #endregion + + #region Helper Methods + + private void SetupSuccessfulSinglePartDownload( + long fileSize, + string eTag = null, + ServerSideEncryptionMethod serverSideEncryptionMethod = null, + string serverSideEncryptionKeyManagementServiceKeyId = null, + ServerSideEncryptionCustomerMethod serverSideEncryptionCustomerMethod = null, + MetadataCollection metadata = null, + ChecksumType checksumType = null, + string checksumCRC32 = null, + string checksumSHA256 = null) + { + var data = MultipartDownloadTestHelpers.GenerateTestData((int)fileSize, 0); + + var response = new GetObjectResponse + { + ContentLength = fileSize, + ResponseStream = new MemoryStream(data), + // Real S3 behavior: ContentRange is NOT included for simple GET requests + // (single-part downloads without Range headers). + // ContentRange IS included when Range headers are used, even for single-part downloads. + // This mock simulates a simple GET without Range headers. + ContentRange = null, + ETag = eTag ?? "\"default-etag\"", + ServerSideEncryptionMethod = serverSideEncryptionMethod, + ServerSideEncryptionKeyManagementServiceKeyId = serverSideEncryptionKeyManagementServiceKeyId, + ServerSideEncryptionCustomerMethod = serverSideEncryptionCustomerMethod, + ChecksumType = checksumType, + ChecksumCRC32 = checksumCRC32, + ChecksumSHA256 = checksumSHA256 + }; + + // Add metadata items if provided (Metadata property is read-only) + if (metadata != null) + { + foreach (var key in metadata.Keys) + { + response.Metadata[key] = metadata[key]; + } + } + + _mockS3Client.Setup(c => c.GetObjectAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(response); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs new file mode 100644 index 000000000000..ed047675bcb3 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadManagerTests.cs @@ -0,0 +1,4179 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Buffers; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class MultipartDownloadManagerTests + { + private Mock CreateMockDataHandler() + { + var mockHandler = new Mock(); + mockHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(async (partNumber, response, ct) => + { + // Simulate reading the stream and firing progress events + // This mimics the real S3 SDK behavior where WriteObjectProgressEvent is fired as data is read + if (response?.ResponseStream != null) + { + var buffer = new byte[8192]; + long totalBytesRead = 0; + long accumulatedBytes = 0; // Accumulate bytes until threshold is reached + int bytesRead; + + // DefaultProgressUpdateInterval is 102400 bytes (100KB) + const long progressThreshold = 102400; + + while ((bytesRead = response.ResponseStream.Read(buffer, 0, buffer.Length)) > 0) + { + totalBytesRead += bytesRead; + accumulatedBytes += bytesRead; + + // Fire progress event when accumulated bytes exceed threshold + // This matches real S3 SDK behavior which throttles progress events + if (accumulatedBytes >= progressThreshold) + { + response.OnRaiseProgressEvent( + null, // filePath + accumulatedBytes, // incrementTransferred + totalBytesRead, // transferred + response.ContentLength, // total + false); // completed + accumulatedBytes = 0; // Reset accumulator after firing event + } + } + + // Fire final event with any remaining bytes + if (accumulatedBytes > 0) + { + response.OnRaiseProgressEvent( + null, // filePath + accumulatedBytes, // incrementTransferred + totalBytesRead, // transferred + response.ContentLength, // total + false); // completed + } + } + + // Give background events time to fire before response is disposed + // OnRaiseProgressEvent uses AWSSDKUtils.InvokeInBackground which queues work on ThreadPool + // Use Thread.Sleep to block and force ThreadPool to execute queued work + Thread.Sleep(500); + + // Additional yield to ensure all queued work completes + await Task.Yield(); + }); + mockHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + mockHandler.Setup(x => x.ReleaseCapacity()); + mockHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + return mockHandler; + } + + /// + /// Helper method to wait for async progress events to complete. + /// Polls until expected bytes are transferred or timeout occurs. + /// + private async Task WaitForProgressEventsAsync( + List progressEvents, + object progressLock, + long expectedBytes, + int timeoutMs = 5000) + { + var startTime = DateTime.UtcNow; + + while ((DateTime.UtcNow - startTime).TotalMilliseconds < timeoutMs) + { + lock (progressLock) + { + if (progressEvents.Count > 0) + { + var lastEvent = progressEvents.Last(); + if (lastEvent.TransferredBytes >= expectedBytes) + { + return true; + } + } + } + + // Small delay between checks + await Task.Delay(10); + } + + return false; + } + + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidParameters_CreatesCoordinator() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockDataHandler = CreateMockDataHandler(); + + // Act + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Assert + Assert.IsNotNull(coordinator); + } + + [DataTestMethod] + [DataRow(true, false, false, false, DisplayName = "Null S3Client")] + [DataRow(false, true, false, false, DisplayName = "Null Request")] + [DataRow(false, false, true, false, DisplayName = "Null Config")] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullParameter_ThrowsArgumentNullException( + bool nullClient, bool nullRequest, bool nullConfig, bool nullHandler) + { + // Arrange + var client = nullClient ? null : MultipartDownloadTestHelpers.CreateMockS3Client().Object; + var request = nullRequest ? null : MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = nullConfig ? null : MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var handler = nullHandler ? null : CreateMockDataHandler().Object; + + // Act + var coordinator = new MultipartDownloadManager(client, request, config, handler); + } + + [TestMethod] + [ExpectedException(typeof(NotSupportedException))] + public void Constructor_WithEncryptionClient_ThrowsNotSupportedException() + { + // Arrange + var mockEncryptionClient = new Mock(); + mockEncryptionClient.As(); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockDataHandler = CreateMockDataHandler(); + + // Act + var coordinator = new MultipartDownloadManager(mockEncryptionClient.Object, request, config, mockDataHandler.Object); + } + + [TestMethod] + public void Constructor_WithEncryptionClient_ExceptionMessageIsDescriptive() + { + // Arrange + var mockEncryptionClient = new Mock(); + mockEncryptionClient.As(); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var mockDataHandler = CreateMockDataHandler(); + + // Act & Assert + try + { + var coordinator = new MultipartDownloadManager(mockEncryptionClient.Object, request, config, mockDataHandler.Object); + Assert.Fail("Expected NotSupportedException was not thrown"); + } + catch (NotSupportedException ex) + { + Assert.IsTrue(ex.Message.Contains("Multipart download is not supported when using Amazon.S3.Internal.IAmazonS3Encryption client. Please use the Amazon.S3.AmazonS3Client for multipart download.")); + } + } + + #endregion + + #region Discovery - PART Strategy - Single Part Tests + + [TestMethod] + public async Task DiscoverUsingPartStrategy_WithNullPartsCount_ReturnsSinglePart() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: 1024 * 1024, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(1, result.TotalParts); + Assert.AreEqual(1024 * 1024, result.ObjectSize); + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_WithPartsCountOne_ReturnsSinglePart() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: 1024 * 1024, + partsCount: 1, + contentRange: null, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.AreEqual(1, result.TotalParts); + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_SinglePart_DoesNotBufferFirstPart() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(objectSize: 1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - Single-part does not buffer during discovery + Assert.IsNotNull(result.InitialResponse); + } + + #endregion + + #region Discovery - PART Strategy - Multipart Tests + + [TestMethod] + public async Task DiscoverUsingPartStrategy_WithMultipleParts_ReturnsMultipart() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; // 50MB + var partSize = 10 * 1024 * 1024; // 10MB + var totalParts = 5; + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "multipart-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.AreEqual(5, result.TotalParts); + Assert.AreEqual(totalObjectSize, result.ObjectSize); + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_Multipart_BuffersFirstPart() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; + var partSize = 10 * 1024 * 1024; + var totalParts = 5; + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "multipart-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - Multipart returns response with stream for buffering in StartDownloadsAsync + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_SavesETag() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + 8 * 1024 * 1024, 5, 40 * 1024 * 1024, "saved-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - ETag is saved internally (verified through subsequent validation) + Assert.IsNotNull(result); + } + + [TestMethod] + public async Task DiscoverUsingPartStrategy_ParsesContentRange() + { + // Arrange + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + var contentRange = $"bytes 0-{partSize - 1}/{totalObjectSize}"; + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: partSize, + partsCount: 7, + contentRange: contentRange, + eTag: "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.AreEqual(totalObjectSize, result.ObjectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task DiscoverUsingPartStrategy_WithInvalidContentRange_ThrowsException() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: 8 * 1024 * 1024, + partsCount: 5, + contentRange: "invalid-format", + eTag: "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + } + + #endregion + + #region Discovery - RANGE Strategy - Small Object Tests + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_SmallObject_ReturnsSinglePart() + { + // Arrange + var objectSize = 1024 * 1024; // 1MB + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: objectSize, + partsCount: null, + contentRange: null, // No ContentRange means entire small object + eTag: "small-object-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.AreEqual(1, result.TotalParts); + Assert.AreEqual(objectSize, result.ObjectSize); + Assert.IsNotNull(result.InitialResponse); + } + + #endregion + + #region Discovery - RANGE Strategy - Single Part from Range Tests + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_SinglePartRange_ReturnsSinglePart() + { + // Arrange + var objectSize = 5 * 1024 * 1024; // 5MB + var contentRange = $"bytes 0-{objectSize - 1}/{objectSize}"; + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: objectSize, + partsCount: null, + contentRange: contentRange, + eTag: "single-range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.AreEqual(1, result.TotalParts); + Assert.IsNotNull(result.InitialResponse); + } + + #endregion + + #region Discovery - RANGE Strategy - Multipart Tests + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_Multipart_ReturnsMultipart() + { + // Arrange + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + var contentRange = $"bytes 0-{partSize - 1}/{totalObjectSize}"; + + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.AreEqual(7, result.TotalParts); // 52428800 / 8388608 = 6.25 -> 7 parts + Assert.IsNotNull(result.InitialResponse); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task DiscoverUsingRangeStrategy_Multipart_ValidatesContentLength() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; + var partSize = 8 * 1024 * 1024; + var wrongPartSize = 5 * 1024 * 1024; // ContentLength doesn't match requested part size + + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: wrongPartSize, + partsCount: null, + contentRange: $"bytes 0-{wrongPartSize - 1}/{totalObjectSize}", + eTag: "range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + } + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_SavesETag() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, 8 * 1024 * 1024 - 1, 50 * 1024 * 1024, "saved-range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - ETag is saved internally + Assert.IsNotNull(result); + } + + [TestMethod] + public async Task DiscoverUsingRangeStrategy_CalculatesPartCount() + { + // Arrange + var totalObjectSize = 52428800; // 50MB + var partSize = 8388608; // 8MB + + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "range-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.AreEqual(7, result.TotalParts); // Ceiling(52428800 / 8388608) = 7 + } + + #endregion + + #region StartDownloadsAsync Tests - Setup + + [TestMethod] + public async Task StartDownloadsAsync_SinglePart_ReturnsImmediately() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act - Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore + + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - should complete without any additional downloads (discovery already made the call) + mockClient.Verify(x => x.GetObjectAsync(It.IsAny(), It.IsAny()), Times.Once); + } + + [TestMethod] + public async Task StartDownloadsAsync_SinglePart_ProcessesPartSynchronously() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - should return discovery result immediately for single-part downloads + Assert.IsNotNull(result); + Assert.AreEqual(1, result.TotalParts); + } + + #endregion + + #region Validation Tests + + [DataTestMethod] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.MissingContentRange, DisplayName = "Missing ContentRange")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.InvalidContentRangeFormat, DisplayName = "Invalid ContentRange Format")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.UnparseableRange, DisplayName = "Unparseable Range")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.RangeMismatch, DisplayName = "Range Mismatch")] + [DataRow(MultipartDownloadTestHelpers.ValidationFailureType.ETagMismatch, DisplayName = "ETag Mismatch")] + [ExpectedException(typeof(InvalidOperationException))] + public async Task Validation_Failures_ThrowInvalidOperationException( + MultipartDownloadTestHelpers.ValidationFailureType failureType) + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockClientWithValidationFailure(failureType); + var coordinator = MultipartDownloadTestHelpers.CreateCoordinatorForValidationTest(mockClient.Object, failureType); + + + // Act & Assert (exception expected via attribute) + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + + [TestMethod] + public async Task Validation_ETag_Matching_Succeeds() + { + // Arrange - All parts have consistent ETag + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + var consistentETag = "consistent-etag"; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, consistentETag, usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act - should succeed with matching ETags + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - no exception thrown + } + + [TestMethod] + public async Task Validation_ContentRange_ValidRange_Succeeds() + { + // Arrange - RANGE strategy with correct ContentRange + var totalObjectSize = 20 * 1024 * 1024; + var partSize = 8 * 1024 * 1024; + + // All three parts have correct ranges + var firstPartResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "test-etag"); + + var secondPartResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + partSize, 2 * partSize - 1, totalObjectSize, "test-etag"); + + var thirdPartResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 2 * partSize, totalObjectSize - 1, totalObjectSize, "test-etag"); + + int callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) return Task.FromResult(firstPartResponse); + if (callCount == 2) return Task.FromResult(secondPartResponse); + return Task.FromResult(thirdPartResponse); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act - should succeed with valid ranges + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert - no exception thrown + } + + #endregion + + #region Sequential Capacity Acquisition Tests + + [TestMethod] + public async Task StartDownloadsAsync_MultipartDownload_AcquiresCapacitySequentially() + { + // Arrange - Test that capacity is acquired in sequential order (Part 1 discovery, then Part 2, 3, 4 background) + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityAcquisitionOrder = new List(); + var capacityAcquisitionLock = new object(); + + var mockDataHandler = new Mock(); + + // Track capacity acquisition order - now includes Part 1 discovery + var callCount = 0; + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + lock (capacityAcquisitionLock) + { + callCount++; + if (callCount == 1) + { + // First call is Part 1 discovery + capacityAcquisitionOrder.Add(1); + } + else + { + // Subsequent calls are background parts 2, 3, 4 + capacityAcquisitionOrder.Add(callCount); + } + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for background task completion + await coordinator.DownloadCompletionTask; + + // Assert - Capacity should be acquired in order: Part 1 (discovery), then Parts 2, 3, 4 (background) + lock (capacityAcquisitionLock) + { + Assert.AreEqual(4, capacityAcquisitionOrder.Count, "Should acquire capacity for parts 1 (discovery), 2, 3, 4 (background)"); + Assert.AreEqual(1, capacityAcquisitionOrder[0], "First capacity acquisition should be for Part 1 discovery"); + Assert.AreEqual(2, capacityAcquisitionOrder[1], "Second capacity acquisition should be for Part 2 background"); + Assert.AreEqual(3, capacityAcquisitionOrder[2], "Third capacity acquisition should be for Part 3 background"); + Assert.AreEqual(4, capacityAcquisitionOrder[3], "Fourth capacity acquisition should be for Part 4 background"); + } + } + + [TestMethod] + public async Task StartDownloadsAsync_MultipartDownload_DoesNotCallWaitForCapacityInCreateDownloadTask() + { + // Arrange - Test that CreateDownloadTaskAsync no longer calls WaitForCapacityAsync (capacity is pre-acquired) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var waitForCapacityCallCount = 0; + var processPartCallCount = 0; + + var mockDataHandler = new Mock(); + + // Track WaitForCapacityAsync calls - now includes Part 1 discovery + background parts 2-3 + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref waitForCapacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref processPartCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert + // WaitForCapacityAsync should be called for Part 1 discovery + background parts 2-3 (total 3 calls) + Assert.AreEqual(3, waitForCapacityCallCount, + "WaitForCapacityAsync should be called for Part 1 discovery + background parts 2-3 (3 times total)"); + + // ProcessPartAsync should be called for all parts (1, 2, 3) + Assert.AreEqual(3, processPartCallCount, + "ProcessPartAsync should be called for all parts (3 times for parts 1-3)"); + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTask_InterleavesCapacityAcquisitionWithTaskCreation() + { + // Arrange - Test that background task interleaves capacity acquisition with task creation + // This ensures sequential ordering: capacity(2) → task(2) → capacity(3) → task(3) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Track operation order with sequential counter + var operationOrder = new List<(string operation, int partNum, int sequence)>(); + var lockObject = new object(); + var operationCounter = 0; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + lock (lockObject) + { + // Capacity is now acquired for Parts 1, 2, 3 (Part 1 during discovery) + var partNum = operationOrder.Count(o => o.operation == "capacity") + 1; + operationOrder.Add(("capacity", partNum, operationCounter++)); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + lock (lockObject) + { + operationOrder.Add(("task", partNum, operationCounter++)); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert + lock (lockObject) + { + var capacityOps = operationOrder.Where(o => o.operation == "capacity").ToList(); + var taskOps = operationOrder.Where(o => o.operation == "task").ToList(); + + Assert.AreEqual(3, capacityOps.Count, "Should acquire capacity for parts 1 (discovery), 2, 3 (background)"); + Assert.AreEqual(3, taskOps.Count, "Should create tasks for parts 1-3"); + + // Verify Part 1: capacity → task (during discovery) + var part1Capacity = capacityOps.FirstOrDefault(o => o.partNum == 1); + var part1Task = taskOps.FirstOrDefault(o => o.partNum == 1); + Assert.IsNotNull(part1Capacity, "Part 1 capacity should be acquired during discovery"); + Assert.IsNotNull(part1Task, "Part 1 should be processed"); + Assert.IsTrue(part1Capacity.sequence < part1Task.sequence, + "Part 1 capacity should be acquired before Part 1 task"); + + // Verify interleaved pattern for background parts (2, 3) + // For each background part: capacity(N) → task(N) → capacity(N+1) → task(N+1) + for (int partNum = 2; partNum <= totalParts; partNum++) + { + var capacity = capacityOps.FirstOrDefault(o => o.partNum == partNum); + var task = taskOps.FirstOrDefault(o => o.partNum == partNum); + + Assert.IsNotNull(capacity, $"Part {partNum} capacity should be acquired"); + Assert.IsNotNull(task, $"Part {partNum} task should be created"); + + // Verify capacity comes before task for this part + Assert.IsTrue(capacity.sequence < task.sequence, + $"Part {partNum} capacity (seq={capacity.sequence}) should come before task (seq={task.sequence})"); + + // Verify interleaving: task(N) should come before capacity(N+1) + if (partNum < totalParts) + { + var nextCapacity = capacityOps.FirstOrDefault(o => o.partNum == partNum + 1); + Assert.IsNotNull(nextCapacity, $"Part {partNum + 1} capacity should exist"); + Assert.IsTrue(task.sequence < nextCapacity.sequence, + $"Part {partNum} task (seq={task.sequence}) should come before Part {partNum + 1} capacity (seq={nextCapacity.sequence})"); + } + } + + // Verify overall sequential pattern: capacity(1) → task(1) → capacity(2) → task(2) → capacity(3) → task(3) + var expectedPattern = new[] + { + ("capacity", 1), ("task", 1), + ("capacity", 2), ("task", 2), + ("capacity", 3), ("task", 3) + }; + + for (int i = 0; i < expectedPattern.Length; i++) + { + Assert.AreEqual(expectedPattern[i].Item1, operationOrder[i].operation, + $"Operation {i} should be {expectedPattern[i].Item1}"); + Assert.AreEqual(expectedPattern[i].Item2, operationOrder[i].partNum, + $"Operation {i} should be for part {expectedPattern[i].Item2}"); + } + } + } + + #endregion + + #region Race Condition Prevention Tests + + [TestMethod] + public async Task StartDownloadsAsync_PreventRaceConditionDeadlock_WithLimitedBuffer() + { + // Arrange - Test scenario that could deadlock with old approach: limited buffer + out-of-order completion + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Simulate a scenario where buffer is limited and parts could complete out of order + var maxInMemoryParts = 2; // Very limited buffer + var capacitySlots = new SemaphoreSlim(maxInMemoryParts); + var partProcessingOrder = new List(); + var lockObject = new object(); + + var mockDataHandler = new Mock(); + + // Simulate capacity checking - old approach could deadlock here + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(async () => + { + // Wait for capacity (this is where the old approach could deadlock) + await capacitySlots.WaitAsync(); + // Note: In real implementation, capacity would be released when part is processed + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + lock (lockObject) + { + partProcessingOrder.Add(partNum); + } + + // Release capacity after processing + capacitySlots.Release(); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act - This should not deadlock with the new sequential approach + var startTime = DateTime.UtcNow; + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + var endTime = DateTime.UtcNow; + + // Assert + var executionTime = (endTime - startTime).TotalSeconds; + Assert.IsTrue(executionTime < 10, + $"Download should complete without deadlock. Took {executionTime:F2} seconds"); + + lock (lockObject) + { + Assert.AreEqual(totalParts, partProcessingOrder.Count, + "All parts should be processed successfully"); + + // Part 1 should be first (processed during StartDownloadsAsync) + Assert.AreEqual(1, partProcessingOrder[0], "Part 1 should be processed first"); + } + } + + [TestMethod] + public async Task StartDownloadsAsync_SequentialCapacityAcquisition_PreventsOutOfOrderBlocking() + { + // Arrange - Test that sequential acquisition prevents out-of-order parts from blocking expected parts + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityOrder = new List(); + var processingOrder = new List(); + var lockObject = new object(); + + var mockDataHandler = new Mock(); + + var partCounter = 0; // Start with part 1 (Part 1 discovery now calls WaitForCapacityAsync) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + lock (lockObject) + { + partCounter++; + capacityOrder.Add(partCounter); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + lock (lockObject) + { + processingOrder.Add(partNum); + } + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert - Capacity acquisition should be in order, preventing blocking + lock (lockObject) + { + Assert.AreEqual(4, capacityOrder.Count, "Should acquire capacity for Part 1 discovery + parts 2, 3, 4 background"); + + // Verify sequential order: Part 1 (discovery), then Parts 2, 3, 4 (background) + for (int i = 0; i < capacityOrder.Count; i++) + { + Assert.AreEqual(i + 1, capacityOrder[i], + $"Capacity acquisition {i} should be for part {i + 1}"); + } + + Assert.AreEqual(totalParts, processingOrder.Count, "All parts should be processed"); + } + } + + #endregion + + #region Background Task Resource Management Tests + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTaskSuccess_DisposesCancellationTokenSource() + { + // Arrange - Test that CancellationTokenSource is disposed after successful background operations + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for background task to complete + await coordinator.DownloadCompletionTask; + + // Assert - Background task should complete successfully + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted && + !coordinator.DownloadCompletionTask.IsFaulted && + !coordinator.DownloadCompletionTask.IsCanceled, + "Background task should complete successfully"); + + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTaskFailure_DisposesCancellationTokenSource() + { + // Arrange - Test that CancellationTokenSource is disposed even when background task fails + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockDataHandler = new Mock(); + + // First call (Part 1) succeeds + var callCount = 0; + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + callCount++; + if (partNum == 1) + { + return Task.CompletedTask; // Part 1 succeeds + } + throw new InvalidOperationException("Simulated download failure"); // Background parts fail + }); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for background task to complete (with failure) + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure + } + + // Assert - Background task should have failed but cleanup should be done + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "Background task should be completed (even with failure)"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, + "Background task should be faulted"); + } + + [TestMethod] + public async Task StartDownloadsAsync_EarlyError_DisposesCancellationTokenSource() + { + // Arrange - Test CancellationTokenSource disposal when error occurs before background task starts + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds (needed for discovery) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync succeeds for Part 1 (discovery) + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Simulate error during PrepareAsync (before background task is created) + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); + + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore + + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated prepare failure", ex.Message); + } + + // Assert - DownloadCompletionTask should return completed task when no background work exists + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "DownloadCompletionTask should return completed task when no background work exists"); + } + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundTaskCancellation_HandlesTokenDisposalProperly() + { + // Arrange - Test proper token disposal when background task is cancelled + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + // Part 1 discovery succeeds (now also calls WaitForCapacityAsync) + var callCount = 0; + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // First call (Part 1 discovery) succeeds + return Task.CompletedTask; + } + else + { + // Second call (background task) cancels + cts.Cancel(); // Cancel during background task execution + throw new OperationCanceledException(); + } + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for background task cancellation + try + { + await coordinator.DownloadCompletionTask; + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - Cancellation should be handled properly with cleanup + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "Background task should be completed"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "Background task should be faulted or canceled"); + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.Dispose(); + coordinator.Dispose(); // Second call should not throw + + // Assert - no exception thrown + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task Operations_AfterDispose_ThrowObjectDisposedException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.Dispose(); + await coordinator.StartDownloadAsync(null, CancellationToken.None); + } + + #endregion + + #region Cancellation Token Tests + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task DiscoverDownloadStrategyAsync_WhenCancelled_ThrowsOperationCanceledException() + { + // Arrange + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new OperationCanceledException()); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await coordinator.StartDownloadAsync(null, cts.Token); + } + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task StartDownloadAsync_SinglePart_WithPreCancelledToken_ThrowsOperationCanceledException() + { + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + await coordinator.StartDownloadAsync(null, cts.Token); + } + + + + [TestMethod] + public async Task DiscoverDownloadStrategyAsync_PassesCancellationTokenToS3Client() + { + // Arrange + CancellationToken capturedToken = default; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Callback((req, ct) => capturedToken = ct) + .ReturnsAsync(MultipartDownloadTestHelpers.CreateSinglePartResponse(1024)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + + // Act + await coordinator.StartDownloadAsync(null, cts.Token); + + // Assert + Assert.AreEqual(cts.Token, capturedToken); + } + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task StartDownloadsAsync_WhenCancelledBeforeStart_ThrowsOperationCanceledException() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + await coordinator.StartDownloadAsync(null, cts.Token); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + + [TestMethod] + public async Task StartDownloadsAsync_WhenCancelledDuringDownloads_NotifiesBufferManager() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // First call (discovery) succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + else + { + // Subsequent calls (downloads) throw cancellation + throw new OperationCanceledException(); + } + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + // Act + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - Verify DownloadCompletionTask is faulted with the cancellation exception + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, "DownloadCompletionTask should be completed"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "DownloadCompletionTask should be faulted or canceled"); + } + + [TestMethod] + public async Task StartDownloadsAsync_WhenCancelled_CompletionTaskIsFaulted() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - Verify DownloadCompletionTask is faulted with the cancellation + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, "DownloadCompletionTask should be completed"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "DownloadCompletionTask should be faulted or canceled"); + } + + [TestMethod] + public async Task StartDownloadsAsync_PassesCancellationTokenToBufferManager() + { + // Arrange + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + var cts = new CancellationTokenSource(); + + // Act + var result = await coordinator.StartDownloadAsync(null, cts.Token); + + // Assert - The cancellation token was passed through to the data handler + Assert.IsNotNull(result); + } + + + [TestMethod] + public async Task StartDownloadsAsync_CancellationPropagatesAcrossConcurrentDownloads() + { + // Arrange - Multiple concurrent downloads, one fails with cancellation + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // Discovery call succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + else if (callCount == 2) + { + // Second download (part 2) throws cancellation + throw new OperationCanceledException(); + } + else + { + // Other downloads should also be cancelled + return Task.FromResult(MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + partSize, totalParts, + $"bytes {(callCount - 1) * partSize}-{callCount * partSize - 1}/{totalObjectSize}", + "test-etag")); + } + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - DownloadCompletionTask should be faulted + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted || coordinator.DownloadCompletionTask.IsCanceled, + "DownloadCompletionTask should be faulted or canceled when errors occur"); + } + + [TestMethod] + public async Task Coordinator_CanBeDisposedAfterCancellation() + { + // Arrange + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new OperationCanceledException()); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var cts = new CancellationTokenSource(); + cts.Cancel(); + + // Act + try + { + await coordinator.StartDownloadAsync(null, cts.Token); + } + catch (OperationCanceledException) + { + // Expected + } + + // Dispose should not throw + coordinator.Dispose(); + + // Assert - Multiple disposes should also work + coordinator.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(OperationCanceledException))] + public async Task StartDownloadsAsync_RangeStrategy_CancellationDuringDownloads() + { + // Arrange - RANGE strategy cancellation + var totalObjectSize = 20 * 1024 * 1024; + var partSize = 8 * 1024 * 1024; + + var callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // Discovery succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "test-etag")); + } + // Part 2 download throws cancellation + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; // Wait for background task to observe exceptions + } + + #endregion + + #region Deadlock Prevention Tests + + [TestMethod] + public async Task StartDownloadsAsync_ReturnsImmediately_PreventsDeadlock() + { + // Arrange - Create a scenario where buffer would fill during downloads + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Track download state + var downloadsStarted = new System.Collections.Concurrent.ConcurrentBag(); + var bufferBlockingStarted = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + // Simulate WaitForCapacityAsync being called (downloads are actively buffering) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + bufferBlockingStarted.TrySetResult(true); + // Return immediately to allow downloads to proceed + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Callback((partNum, _, __) => + { + downloadsStarted.Add(partNum); + }) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act - StartDownloadsAsync should return immediately (not wait for all downloads) + var stopwatch = System.Diagnostics.Stopwatch.StartNew(); + await coordinator.StartDownloadAsync(null, CancellationToken.None); + stopwatch.Stop(); + + // Assert - StartDownloadsAsync should return almost immediately + // The key is it returns BEFORE all downloads complete, allowing consumer to start reading + Assert.IsTrue(stopwatch.ElapsedMilliseconds < 1000, + $"StartDownloadsAsync should return immediately, took {stopwatch.ElapsedMilliseconds}ms"); + + // Verify Part 1 was processed (synchronously during StartDownloadsAsync) + Assert.IsTrue(downloadsStarted.Contains(1), "Part 1 should be processed synchronously"); + + // Wait for background downloads to start + var bufferCalledTask = Task.WhenAny(bufferBlockingStarted.Task, Task.Delay(2000)); + await bufferCalledTask; + Assert.IsTrue(bufferBlockingStarted.Task.IsCompleted, + "Background downloads should have started after StartDownloadsAsync returned"); + + // Verify DownloadCompletionTask exists and is for background work + Assert.IsNotNull(coordinator.DownloadCompletionTask, + "DownloadCompletionTask should be set for multipart downloads"); + + // Wait for all background downloads to complete + await coordinator.DownloadCompletionTask; + + // Verify all parts were eventually processed + Assert.AreEqual(totalParts, downloadsStarted.Count, + "All parts should be processed in background"); + } + + [TestMethod] + public async Task StartDownloadsAsync_SinglePart_ReturnsImmediatelyWithoutBackgroundTask() + { + // Arrange - Single-part downloads should not create background tasks + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + var mockDataHandler = CreateMockDataHandler(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Call DiscoverDownloadStrategyAsync first to properly acquire HTTP semaphore + + + // Act + var stopwatch = System.Diagnostics.Stopwatch.StartNew(); + await coordinator.StartDownloadAsync(null, CancellationToken.None); + stopwatch.Stop(); + + // DownloadCompletionTask should be completed immediately (no background work) + Assert.IsTrue(coordinator.DownloadCompletionTask.IsCompleted, + "DownloadCompletionTask should be completed for single-part downloads"); + + // Verify OnDownloadComplete was called + mockDataHandler.Verify(x => x.OnDownloadComplete(null), Times.Once); + } + + #endregion + + #region Capacity Checking Tests + + [TestMethod] + public async Task Discovery_PartStrategy_CallsWaitForCapacityAsync() + { + // Arrange - PART strategy should check capacity during discovery + var capacityCallCount = 0; + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var totalObjectSize = 24 * 1024 * 1024; // 24MB -> 3 parts @ 8MB + var partSize = 8 * 1024 * 1024; // 8MB + var totalParts = 3; + + var mockResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(3, result.TotalParts); + Assert.AreEqual(1, capacityCallCount, + "PART strategy should call WaitForCapacityAsync during Part 1 discovery"); + } + + + [TestMethod] + public async Task Discovery_RangeStrategy_CallsWaitForCapacityAsync() + { + // Arrange - RANGE strategy should also check capacity during discovery + var capacityCallCount = 0; + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var totalObjectSize = 17 * 1024 * 1024; // 17MB -> 3 parts @ 8MB + var partSize = 8 * 1024 * 1024; // 8MB + + var mockResponse = MultipartDownloadTestHelpers.CreateRangeResponse( + 0, partSize - 1, totalObjectSize, "test-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(3, result.TotalParts); // 17MB / 8MB = 3 parts (ceiling) + Assert.AreEqual(1, capacityCallCount, + "RANGE strategy should call WaitForCapacityAsync during Part 1 discovery"); + } + + [TestMethod] + public async Task MultipleDownloads_WithSharedHttpThrottler_RespectsLimits() + { + // Arrange - Simulate directory download scenario with shared throttler + var sharedThrottler = new SemaphoreSlim(1, 1); // Very limited: 1 concurrent request + var mockDataHandler1 = CreateMockDataHandler(); + var mockDataHandler2 = CreateMockDataHandler(); + + // Create two download managers sharing the same HTTP throttler + var mockResponse1 = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "file1-etag"); + var mockResponse2 = MultipartDownloadTestHelpers.CreateSinglePartResponse(2048, "file2-etag"); + + var mockClient1 = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse1)); + var mockClient2 = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse2)); + + var request1 = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var request2 = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + var coordinator1 = new MultipartDownloadManager(mockClient1.Object, request1, config, mockDataHandler1.Object, null, sharedThrottler); + var coordinator2 = new MultipartDownloadManager(mockClient2.Object, request2, config, mockDataHandler2.Object, null, sharedThrottler); + + var download1 = await coordinator1.StartDownloadAsync(null, CancellationToken.None); + var download2 = await coordinator2.StartDownloadAsync(null, CancellationToken.None); + + // Wait for all background work to complete + await Task.WhenAll( + coordinator1.DownloadCompletionTask, + coordinator2.DownloadCompletionTask + ); + + // Assert - Both should complete successfully and semaphore should be fully released + Assert.IsNotNull(download1); + Assert.IsNotNull(download2); + Assert.AreEqual(1, sharedThrottler.CurrentCount, "HTTP throttler should be fully released after complete download lifecycle"); + + // Cleanup + coordinator1.Dispose(); + coordinator2.Dispose(); + sharedThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_HttpRequestFails_ReleasesCapacityProperly() + { + // Arrange - Simulate HTTP request failure + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = CreateMockDataHandler(); + var mockClient = new Mock(); + + // HTTP request throws exception + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated S3 failure")); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated S3 failure", ex.Message); + } + + // Assert - HTTP concurrency should be properly released even after failure + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should be released even when HTTP request fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_CancellationDuringCapacityWait_ReleasesHttpSlotProperly() + { + // Arrange - Test cancellation during capacity acquisition + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + // Cancel during capacity wait + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + cts.Cancel(); + throw new OperationCanceledException(); + }); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, cts.Token); + Assert.Fail("Expected OperationCanceledException to be thrown"); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - HTTP slot should still be available (never acquired due to early cancellation) + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should remain available when cancelled before HTTP request"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_CancellationAfterCapacityButBeforeHttp_ReleasesHttpSlotProperly() + { + // Arrange - Test cancellation after capacity but before HTTP call + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + // Capacity acquisition succeeds + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // HTTP call gets cancelled + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + cts.Token.ThrowIfCancellationRequested(); + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + cts.Cancel(); // Cancel before discovery + await coordinator.StartDownloadAsync(null, cts.Token); + Assert.Fail("Expected OperationCanceledException to be thrown"); + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - HTTP slot should be properly released by finally block + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP concurrency slot should be released by finally block on cancellation"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_SinglePart_StillCallsCapacityCheck() + { + // Arrange - Even single-part downloads should check capacity during discovery + var capacityCallCount = 0; + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityCallCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Act + var result = await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Assert + Assert.IsNotNull(result); + Assert.AreEqual(1, result.TotalParts); + Assert.AreEqual(1, capacityCallCount, + "Even single-part downloads should call WaitForCapacityAsync during discovery"); + } + + + #endregion + + #region Concurrency Control Tests + + [TestMethod] + public async Task HttpSemaphore_HeldThroughProcessPartAsync() + { + // Arrange - Test that HTTP semaphore is NOT released until ProcessPartAsync completes + var totalParts = 2; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Use our own semaphore to monitor its state + var concurrentRequests = 1; + var httpSemaphore = new SemaphoreSlim(concurrentRequests, concurrentRequests); + + var part1EnteredProcessPart = new TaskCompletionSource(); + var part1CanExitProcessPart = new TaskCompletionSource(); + var semaphoreWasReleasedDuringPart1 = false; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(async (partNum, response, ct) => + { + if (partNum == 1) + { + // Part 1 enters ProcessPartAsync + part1EnteredProcessPart.SetResult(true); + + // Check if semaphore has been released (it shouldn't be with the fix!) + if (httpSemaphore.CurrentCount > 0) + { + semaphoreWasReleasedDuringPart1 = true; + } + + // Block Part 1 here so we can observe semaphore state + await part1CanExitProcessPart.Task; + } + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration( + concurrentRequests: concurrentRequests); + + // Pass in our instrumented semaphore + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpSemaphore); + + + + // Act + var startTask = coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for Part 1 to enter ProcessPartAsync + await part1EnteredProcessPart.Task; + + // Check semaphore state while Part 1 is in ProcessPartAsync + var semaphoreAvailableDuringProcessing = httpSemaphore.CurrentCount > 0; + + // Release Part 1 to continue + part1CanExitProcessPart.SetResult(true); + + await startTask; + await coordinator.DownloadCompletionTask; + + // Assert - This is the deterministic test of the fix + Assert.IsFalse(semaphoreAvailableDuringProcessing, + "HTTP semaphore should NOT be released while ProcessPartAsync is executing. " + + "Before fix semaphore.CurrentCount would be > 0 (released early). " + + "After fix: semaphore.CurrentCount should be 0 (held through ProcessPartAsync)."); + + Assert.IsFalse(semaphoreWasReleasedDuringPart1, + "Semaphore should not have been released at any point during Part 1 ProcessPartAsync execution"); + + // Cleanup + httpSemaphore.Dispose(); + } + + [TestMethod] + public async Task HttpSemaphore_RangeStrategy_HeldThroughProcessPartAsync() + { + // Arrange - Test that RANGE strategy also holds semaphore through ProcessPartAsync + var totalObjectSize = 17 * 1024 * 1024; // 17MB -> 3 parts @ 8MB + var partSize = 8 * 1024 * 1024; + + var concurrentRequests = 1; + var httpSemaphore = new SemaphoreSlim(concurrentRequests, concurrentRequests); + + var part1EnteredProcessPart = new TaskCompletionSource(); + var part1CanExitProcessPart = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(async (partNum, response, ct) => + { + if (partNum == 1) + { + part1EnteredProcessPart.SetResult(true); + await part1CanExitProcessPart.Task; + } + }); + + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + 3, partSize, totalObjectSize, "test-etag", usePartStrategy: false); // RANGE strategy + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration( + concurrentRequests: concurrentRequests); + + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpSemaphore); + + + + // Act + var startTask = coordinator.StartDownloadAsync(null, CancellationToken.None); + await part1EnteredProcessPart.Task; + + // Check semaphore state while Part 1 is in ProcessPartAsync + var semaphoreAvailableDuringProcessing = httpSemaphore.CurrentCount > 0; + + part1CanExitProcessPart.SetResult(true); + await startTask; + await coordinator.DownloadCompletionTask; + + // Assert + Assert.IsFalse(semaphoreAvailableDuringProcessing, + "RANGE strategy should also hold HTTP semaphore through ProcessPartAsync"); + + // Cleanup + httpSemaphore.Dispose(); + } + + #endregion + + #region Semaphore Release Error Path Tests + + [TestMethod] + public async Task Discovery_WaitForCapacityFails_DoesNotReleaseHttpSemaphore() + { + // Arrange - Test that semaphore is NOT released when it was never acquired + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync fails BEFORE HTTP semaphore is acquired + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated capacity wait failure")); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated capacity wait failure", ex.Message); + } + + // Assert - Semaphore should NOT be released (it was never acquired) + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should NOT be released when it was never acquired (failed before WaitAsync)"); + + // Cleanup + httpThrottler.Dispose(); + } + + + + [TestMethod] + public async Task StartDownloadAsync_WaitForCapacityFails_DoesNotReleaseHttpSemaphore() + { + // Arrange - Test that semaphore is NOT released when it was never acquired + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync fails BEFORE HTTP semaphore is acquired + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated capacity wait failure")); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated capacity wait failure", ex.Message); + } + + // Assert - Semaphore should NOT be released (it was never acquired) + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should NOT be released when it was never acquired (failed before WaitAsync)"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task Discovery_HttpRequestAfterCapacityFails_ReleasesHttpSemaphore() + { + // Arrange - Test semaphore release when HTTP request fails after capacity is acquired + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds (capacity acquired) + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // HTTP request fails AFTER both capacity types are acquired + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated S3 failure after capacity acquired")); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated S3 failure after capacity acquired", ex.Message); + } + + // Assert - HTTP semaphore should be released by catch block in discovery + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when HTTP request fails in discovery"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadAsync_PrepareAsyncFails_ReleasesHttpSemaphore() + { + // Arrange - Test that HTTP semaphore is released when PrepareAsync fails after discovery + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync succeeds for Part 1 (discovery) + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // PrepareAsync FAILS (this happens after Part 1 processing in StartDownloadAsync) + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated prepare failure")); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + 2, 8 * 1024 * 1024, 16 * 1024 * 1024, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated prepare failure", ex.Message); + } + + // Assert - HTTP semaphore should be released even when PrepareAsync fails + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when PrepareAsync fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadAsync_BackgroundPartHttpFails_ReleasesHttpSemaphore() + { + // Arrange - Test that HTTP semaphore is released when background part HTTP request fails + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // Capacity checks succeed + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + // HTTP client: Part 1 succeeds, Part 2 HTTP request FAILS + var callCount = 0; + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + // Part 1 discovery succeeds + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + 8 * 1024 * 1024, 2, 16 * 1024 * 1024, "test-etag")); + } + // Part 2 HTTP request fails + throw new AmazonS3Exception("Simulated S3 HTTP failure"); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for background task to complete with failure + try + { + await coordinator.DownloadCompletionTask; + } + catch (AmazonS3Exception) + { + // Expected + } + + // Assert - HTTP semaphore should be fully released after background failure + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when background part HTTP request fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadAsync_Part1ProcessingFails_ReleasesHttpSemaphore() + { + // Arrange - Test that HTTP semaphore is released when Part 1 processing fails during discovery + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // WaitForCapacityAsync succeeds + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1 ProcessPartAsync FAILS (during discovery phase of StartDownloadAsync) + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated Part 1 processing failure")); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + 2, 8 * 1024 * 1024, 16 * 1024 * 1024, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act & Assert + try + { + await coordinator.StartDownloadAsync(null, CancellationToken.None); + Assert.Fail("Expected InvalidOperationException to be thrown"); + } + catch (InvalidOperationException ex) + { + Assert.AreEqual("Simulated Part 1 processing failure", ex.Message); + } + + // Assert - HTTP semaphore should be released when Part 1 processing fails + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when Part 1 processing fails during discovery"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task StartDownloadAsync_BackgroundPartProcessingFails_ReleasesHttpSemaphore() + { + // Arrange - Test that HTTP semaphore is released when background part processing fails + var httpThrottler = new SemaphoreSlim(2, 2); + var initialCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // Capacity checks succeed + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1 processing succeeds, Part 2 processing FAILS + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum == 1) + { + return Task.CompletedTask; // Part 1 succeeds + } + throw new InvalidOperationException("Simulated Part 2 processing failure"); + }); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + 2, 8 * 1024 * 1024, 16 * 1024 * 1024, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for background task to complete with failure + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected + } + + // Assert - HTTP semaphore should be fully released after background failure + Assert.AreEqual(initialCount, httpThrottler.CurrentCount, + "HTTP semaphore should be released when background part processing fails"); + + // Cleanup + httpThrottler.Dispose(); + } + + #endregion + + #region ContentRange and Part Range Calculation Tests + + [TestMethod] + public void ParseContentRange_ValidFormat_ReturnsCorrectValues() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-8388607/52428800"; + + // Act + var (startByte, endByte, totalSize) = coordinator.ParseContentRange(contentRange); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(8388607L, endByte); + Assert.AreEqual(52428800L, totalSize); + } + + [TestMethod] + public void ParseContentRange_SingleByteRange_ReturnsCorrectValues() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-0/1"; + + // Act + var (startByte, endByte, totalSize) = coordinator.ParseContentRange(contentRange); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(0L, endByte); + Assert.AreEqual(1L, totalSize); + } + + [TestMethod] + public void ParseContentRange_LargeFileLastPart_ReturnsCorrectValues() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 50331648-52428799/52428800"; + + // Act + var (startByte, endByte, totalSize) = coordinator.ParseContentRange(contentRange); + + // Assert + Assert.AreEqual(50331648L, startByte); + Assert.AreEqual(52428799L, endByte); + Assert.AreEqual(52428800L, totalSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_NullContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange(null); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_EmptyContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange(string.Empty); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_InvalidFormat_NoSlash_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes 0-1000"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_InvalidFormat_NoDash_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes 01000/5000"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_InvalidFormat_NonNumericRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes abc-def/5000"); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_WildcardTotalSize_ThrowsInvalidOperationExceptionWithMessage() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act & Assert + try + { + coordinator.ParseContentRange("bytes 0-1000/*"); + Assert.Fail("Expected InvalidOperationException was not thrown"); + } + catch (InvalidOperationException ex) + { + Assert.IsTrue(ex.Message.Contains("Unexpected wildcard")); + Assert.IsTrue(ex.Message.Contains("S3 always returns exact object sizes")); + throw; + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ParseContentRange_NonNumericTotalSize_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ParseContentRange("bytes 0-1000/abc"); + } + + [TestMethod] + public void ExtractTotalSizeFromContentRange_ValidFormat_ReturnsTotalSize() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-8388607/52428800"; + + // Act + var totalSize = coordinator.ExtractTotalSizeFromContentRange(contentRange); + + // Assert + Assert.AreEqual(52428800L, totalSize); + } + + [TestMethod] + public void ExtractTotalSizeFromContentRange_SmallFile_ReturnsTotalSize() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var contentRange = "bytes 0-999/1000"; + + // Act + var totalSize = coordinator.ExtractTotalSizeFromContentRange(contentRange); + + // Assert + Assert.AreEqual(1000L, totalSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ExtractTotalSizeFromContentRange_InvalidFormat_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Act + coordinator.ExtractTotalSizeFromContentRange("invalid-format"); + } + + [TestMethod] + public void CalculatePartRange_FirstPart_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 50 * 1024 * 1024; // 50MB + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(1, objectSize); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(8 * 1024 * 1024 - 1, endByte); + } + + [TestMethod] + public void CalculatePartRange_MiddlePart_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 50 * 1024 * 1024; // 50MB + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(3, objectSize); + + // Assert + Assert.AreEqual(2 * 8 * 1024 * 1024, startByte); // Part 3 starts at 16MB + Assert.AreEqual(3 * 8 * 1024 * 1024 - 1, endByte); // Part 3 ends at 24MB - 1 + } + + [TestMethod] + public void CalculatePartRange_LastPartFullSize_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 48 * 1024 * 1024; // 48MB (exactly 6 parts) + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(6, objectSize); + + // Assert + Assert.AreEqual(5 * 8 * 1024 * 1024, startByte); // Part 6 starts at 40MB + Assert.AreEqual(48 * 1024 * 1024 - 1, endByte); // Part 6 ends at object end + } + + [TestMethod] + public void CalculatePartRange_LastPartPartialSize_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 52428800; // 50MB (7 parts with last part partial) + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(7, objectSize); + + // Assert + Assert.AreEqual(6 * 8 * 1024 * 1024, startByte); // Part 7 starts at 48MB + Assert.AreEqual(52428800 - 1, endByte); // Part 7 ends at object end (partial part) + } + + [TestMethod] + public void CalculatePartRange_SmallObject_SinglePart_ReturnsCorrectRange() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: 8 * 1024 * 1024); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + var objectSize = 1024; // 1KB + + // Act + var (startByte, endByte) = coordinator.CalculatePartRange(1, objectSize); + + // Assert + Assert.AreEqual(0L, startByte); + Assert.AreEqual(1023L, endByte); // 1KB - 1 + } + + [TestMethod] + public void ValidateContentRange_RangeStrategy_ValidRange_DoesNotThrow() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 0-8388607/52428800" + }; + var objectSize = 52428800L; + + // Act - should not throw + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + public void ValidateContentRange_RangeStrategy_MiddlePart_ValidRange_DoesNotThrow() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 16777216-25165823/52428800" + }; + var objectSize = 52428800L; + + // Act - should not throw + coordinator.ValidateContentRange(response, 3, objectSize); + } + + [TestMethod] + public void ValidateContentRange_PartStrategy_DoesNotValidate() + { + // Arrange - PART strategy should skip validation + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 0-8388607/52428800" // Valid range + }; + var objectSize = 52428800L; + + // Act - should not throw and should not validate + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_MissingContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = null + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_EmptyContentRange_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = string.Empty + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_WrongStartByte_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Expected: bytes 0-8388607, Actual: bytes 100-8388607 (wrong start) + var response = new GetObjectResponse + { + ContentRange = "bytes 100-8388607/52428800" + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void ValidateContentRange_RangeStrategy_WrongEndByte_ThrowsInvalidOperationException() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + // Expected: bytes 0-8388607, Actual: bytes 0-8388600 (wrong end) + var response = new GetObjectResponse + { + ContentRange = "bytes 0-8388600/52428800" + }; + var objectSize = 52428800L; + + // Act + coordinator.ValidateContentRange(response, 1, objectSize); + } + + [TestMethod] + public void ValidateContentRange_RangeStrategy_ExceptionMessage_ContainsExpectedAndActualRanges() + { + // Arrange + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client(); + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: 8 * 1024 * 1024, + downloadType: MultipartDownloadType.RANGE); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + var response = new GetObjectResponse + { + ContentRange = "bytes 100-8388607/52428800" + }; + var objectSize = 52428800L; + + // Act & Assert + try + { + coordinator.ValidateContentRange(response, 1, objectSize); + Assert.Fail("Expected InvalidOperationException was not thrown"); + } + catch (InvalidOperationException ex) + { + Assert.IsTrue(ex.Message.Contains("ContentRange mismatch")); + Assert.IsTrue(ex.Message.Contains("Expected: bytes 0-8388607")); + Assert.IsTrue(ex.Message.Contains("Actual: bytes 100-8388607")); + } + } + + #endregion + + #region Progress Callback Tests + + [TestMethod] + public async Task ProgressCallback_ConcurrentCompletion_FiresOnlyOneCompletionEvent() + { + // Arrange - Simulate 3 parts completing simultaneously + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + // Track all progress events + var progressEvents = new List(); + var progressLock = new object(); + + EventHandler progressCallback = (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + // Create mock responses that simulate concurrent completion + var firstPartResponse = MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag"); + + var secondPartResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + partSize, totalParts, + $"bytes {partSize}-{2 * partSize - 1}/{totalObjectSize}", + "test-etag"); + + var thirdPartResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + partSize, totalParts, + $"bytes {2 * partSize}-{totalObjectSize - 1}/{totalObjectSize}", + "test-etag"); + + int callCount = 0; + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) return Task.FromResult(firstPartResponse); + if (callCount == 2) return Task.FromResult(secondPartResponse); + return Task.FromResult(thirdPartResponse); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration( + concurrentRequests: 3); // Allow all parts to complete simultaneously + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act + await coordinator.StartDownloadAsync(progressCallback, CancellationToken.None); + + // Wait for async progress events to complete + var success = await WaitForProgressEventsAsync(progressEvents, progressLock, totalObjectSize); + Assert.IsTrue(success, "Timed out waiting for progress events to complete"); + + // Assert - Verify only ONE completion event fired (IsCompleted=true) + lock (progressLock) + { + var completionEvents = progressEvents.Where(e => e.PercentDone == 100 && e.TransferredBytes == totalObjectSize).ToList(); + + // There should be at least one event at 100% + Assert.IsTrue(completionEvents.Count > 0, "Expected at least one progress event at 100%"); + + // But only ONE should have been fired with the atomic flag logic + // (Note: Due to the buffering and event timing, we might see multiple events at 100%, + // but the key is that the completion logic only fired once) + Assert.IsTrue(progressEvents.Count > 0, "Expected progress events to be fired"); + + // Verify we reached 100% completion + var finalEvent = progressEvents.Last(); + Assert.AreEqual(100, finalEvent.PercentDone, "Expected final progress to be 100%"); + Assert.AreEqual(totalObjectSize, finalEvent.TransferredBytes, "Expected all bytes transferred"); + } + } + + [TestMethod] + public async Task ProgressCallback_MultiplePartsComplete_AggregatesCorrectly() + { + // Arrange - Test progress aggregation across multiple parts + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var progressEvents = new List(); + var progressLock = new object(); + + EventHandler progressCallback = (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, CreateMockDataHandler().Object); + + + + // Act + await coordinator.StartDownloadAsync(progressCallback, CancellationToken.None); + + // Wait for async progress events to complete + var success = await WaitForProgressEventsAsync(progressEvents, progressLock, totalObjectSize); + Assert.IsTrue(success, "Timed out waiting for progress events to complete"); + + // Assert + lock (progressLock) + { + // Should have received progress events + Assert.IsTrue(progressEvents.Count > 0, "Expected progress events"); + + // Final event should show 100% completion + var finalEvent = progressEvents.Last(); + Assert.AreEqual(totalObjectSize, finalEvent.TransferredBytes, "Expected all bytes transferred"); + Assert.AreEqual(100, finalEvent.PercentDone, "Expected 100% completion"); + + // TransferredBytes should only increase (monotonic) + long lastTransferred = 0; + foreach (var evt in progressEvents) + { + Assert.IsTrue(evt.TransferredBytes >= lastTransferred, + "TransferredBytes should be monotonically increasing"); + lastTransferred = evt.TransferredBytes; + } + } + } + + #endregion + + #region Cancellation Enhancement Tests + + [TestMethod] + public async Task StartDownloadsAsync_BackgroundPartFails_CancelsInternalToken() + { + // Arrange - Deterministic test using TaskCompletionSource to control execution order + // This ensures Part 3 waits at synchronization point, Part 2 fails, then Part 3 checks cancellation + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var part2Failed = false; + var part3SawCancellation = false; + + // Synchronization primitives to control execution order + var part3ReachedSyncPoint = new TaskCompletionSource(); + var part2CanFail = new TaskCompletionSource(); + var part3CanCheckCancellation = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + // Capacity acquisition succeeds for all parts + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + // PrepareAsync succeeds + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // ProcessPartAsync: Controlled execution order using TaskCompletionSource + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(async (partNum, response, ct) => + { + if (partNum == 1) + { + return; // Part 1 succeeds immediately + } + else if (partNum == 2) + { + // Part 2 waits for Part 3 to reach sync point before failing + await part2CanFail.Task; + part2Failed = true; + throw new InvalidOperationException("Simulated Part 2 failure"); + } + else // Part 3 + { + // Part 3 reaches sync point and signals to Part 2 + part3ReachedSyncPoint.SetResult(true); + + // Wait for Part 2 to fail and cancellation to propagate + await part3CanCheckCancellation.Task; + + // Now check if cancellation was received from internalCts + if (ct.IsCancellationRequested) + { + part3SawCancellation = true; + throw new OperationCanceledException("Part 3 cancelled due to Part 2 failure"); + } + } + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act - Start downloads + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for Part 3 to reach synchronization point + await part3ReachedSyncPoint.Task; + + // Allow Part 2 to fail + part2CanFail.SetResult(true); + + // Give cancellation time to propagate + await Task.Delay(100); + + // Allow Part 3 to check cancellation + part3CanCheckCancellation.SetResult(true); + + // Wait for background task to complete + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure from Part 2 + } + + // Assert - Deterministic verification that cancellation propagated + Assert.IsTrue(part2Failed, "Part 2 should have failed"); + Assert.IsTrue(part3SawCancellation, + "Part 3 should have received cancellation via internalCts.Token (deterministic with TaskCompletionSource)"); + + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, + "DownloadCompletionTask should be faulted when background part fails"); + } + + [TestMethod] + public async Task StartDownloadsAsync_MultiplePartsFail_HandlesGracefully() + { + // Arrange - Test simultaneous failures from multiple parts + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var failedParts = new System.Collections.Concurrent.ConcurrentBag(); + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1 succeeds, Parts 2, 3, 4 all fail + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum == 1) + { + return Task.CompletedTask; + } + + failedParts.Add(partNum); + throw new InvalidOperationException($"Simulated Part {partNum} failure"); + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected - at least one part failed + } + + // Assert - Should handle multiple failures gracefully + Assert.IsTrue(failedParts.Count > 0, "At least one part should have failed"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, "DownloadCompletionTask should be faulted"); + } + + [TestMethod] + public async Task StartDownloadsAsync_CancellationRacesWithDispose_HandlesGracefully() + { + // Arrange - Test race condition between Cancel() and Dispose() + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var objectDisposedExceptionCaught = false; + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1 succeeds, Part 2 fails triggering cancellation + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum == 1) + { + return Task.CompletedTask; + } + + // Part 2 failure will trigger Cancel() in catch block + // The enhancement should check IsCancellationRequested to avoid ObjectDisposedException + throw new InvalidOperationException("Simulated Part 2 failure"); + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler + .Setup(x => x.OnDownloadComplete(It.IsAny())) + .Callback(ex => + { + // Check if ObjectDisposedException was handled + if (ex is ObjectDisposedException) + { + objectDisposedExceptionCaught = true; + } + }); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure + } + + // Assert - The enhancement should prevent ObjectDisposedException from being thrown + // by checking IsCancellationRequested before calling Cancel() + Assert.IsFalse(objectDisposedExceptionCaught, + "ObjectDisposedException should not propagate due to IsCancellationRequested check"); + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, + "DownloadCompletionTask should be faulted with the original failure"); + + // Verify the exception type via the Task's exception + var aggregateException = coordinator.DownloadCompletionTask.Exception; + Assert.IsNotNull(aggregateException, "Task should have an exception"); + Assert.IsInstanceOfType(aggregateException.InnerException, typeof(InvalidOperationException), + "Inner exception should be the original InvalidOperationException from Part 2 failure"); + } + + [TestMethod] + public async Task StartDownloadsAsync_PartFailsDuringDownload_OriginalExceptionPropagatesFromCompletionTask() + { + // Arrange - Test that when a part fails with InvalidOperationException, + // the DownloadCompletionTask throws InvalidOperationException (not OperationCanceledException) + // This validates the WhenAllOrFirstExceptionWithFaultPriorityAsync fix + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.PrepareAsync(It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 1, 2 succeed; Part 3 fails with InvalidOperationException + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns((partNum, response, ct) => + { + if (partNum <= 2) + { + return Task.CompletedTask; // Parts 1-2 succeed + } + if (partNum == 3) + { + throw new InvalidOperationException("Simulated Part 3 failure"); + } + // Parts 4-5 may or may not run depending on cancellation timing + ct.ThrowIfCancellationRequested(); + return Task.CompletedTask; + }); + + mockDataHandler.Setup(x => x.ReleaseCapacity()); + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + Exception caughtException = null; + try + { + await coordinator.DownloadCompletionTask; + } + catch (Exception ex) + { + caughtException = ex; + } + + // Assert - The key validation: exception should be InvalidOperationException, NOT OperationCanceledException + // Before fix: WhenAllOrFirstExceptionAsync checked cancellation before processing faulted tasks, + // so OperationCanceledException would be thrown instead of the original exception + // After fix: WhenAllOrFirstExceptionWithFaultPriorityAsync checks for completed tasks first, + // ensuring the original InvalidOperationException propagates + Assert.IsNotNull(caughtException, "DownloadCompletionTask should throw an exception"); + Assert.IsInstanceOfType(caughtException, typeof(InvalidOperationException), + "DownloadCompletionTask should throw InvalidOperationException (the original failure), " + + "NOT OperationCanceledException. If this fails, WhenAllOrFirstExceptionWithFaultPriorityAsync " + + "is not properly prioritizing faulted tasks over cancellation checks."); + Assert.AreEqual("Simulated Part 3 failure", caughtException.Message, + "The original exception message should be preserved"); + + // Also verify DownloadCompletionTask is faulted + Assert.IsTrue(coordinator.DownloadCompletionTask.IsFaulted, "DownloadCompletionTask should be faulted"); + } + + #endregion + + #region Semaphore and Capacity Release Tests + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationAfterCapacityBeforeHttpSlot_ReleasesCapacityExactlyOnce() + { + // Arrange - Test that when cancellation happens after acquiring capacity but before HTTP slot, + // capacity is released exactly once (not double-released) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + var capacityAcquireCount = 0; + var httpSlotAcquireCount = 0; + + // Use a blocking HTTP throttler that we control + var httpThrottler = new SemaphoreSlim(1, 1); + + // Control when Part 2 can acquire HTTP slot + var part2CanAcquireHttpSlot = new TaskCompletionSource(); + var part2AcquiredCapacity = new TaskCompletionSource(); + + var mockDataHandler = new Mock(); + + // Track capacity acquisition + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(ct => + { + var count = Interlocked.Increment(ref capacityAcquireCount); + if (count == 2) // Part 2's capacity acquisition + { + part2AcquiredCapacity.SetResult(true); + // Wait a bit to let the cancellation happen + return Task.Delay(50); + } + return Task.CompletedTask; + }); + + // Track capacity release + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + // S3 client: Part 1 succeeds, Part 2 will be cancelled before HTTP request + var callCount = 0; + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(async (req, ct) => + { + await Task.Yield(); + var count = Interlocked.Increment(ref callCount); + if (count == 1) + { + // Part 1 discovery succeeds + return MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag"); + } + + Interlocked.Increment(ref httpSlotAcquireCount); + // Part 2 HTTP request - should not reach here if cancellation works + throw new OperationCanceledException(); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + var startTask = coordinator.StartDownloadAsync(null, CancellationToken.None); + + // Wait for Part 1 to complete and Part 2 to acquire capacity + await startTask; + + // The background task will cancel when Part 2 tries to acquire the HTTP slot + // and finds the slot is held (we're not releasing it) + try + { + await coordinator.DownloadCompletionTask; + } + catch (OperationCanceledException) + { + // Expected - timed out or cancelled + } + catch (Exception) + { + // Other exceptions are also acceptable for this test + } + + // Assert - Capacity should be released exactly once per acquisition (no double-release) + // Part 1 capacity is released in ProcessFirstPartAsync's finally block (not ReleaseCapacity) + // Part 2+ capacity is released via ReleaseCapacity when cancellation or error occurs + Assert.IsTrue(capacityReleaseCount <= capacityAcquireCount - 1, + $"Capacity should not be double-released. Acquired={capacityAcquireCount}, Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationAfterBothAcquired_ReleasesBothExactlyOnce() + { + // Arrange - Test that when cancellation happens after acquiring both capacity and HTTP slot, + // both are released exactly once + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + var capacityAcquireCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + // Track capacity acquisition + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityAcquireCount); + return Task.CompletedTask; + }); + + // Track capacity release + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 2 processing fails after both capacity and HTTP slot are acquired + mockDataHandler + .Setup(x => x.ProcessPartAsync(2, It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated Part 2 processing failure")); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected failure from Part 2 + } + + // Assert - HTTP semaphore should be back to initial count (all slots released) + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity releases should match acquisitions minus Part 1 (which doesn't use ReleaseCapacity) + // Part 2 will release capacity in error handler + Assert.IsTrue(capacityReleaseCount >= 1, + $"At least Part 2's capacity should be released. Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_TaskCreationFails_ReleasesHttpSlotAndCapacity() + { + // Arrange - Test that if task creation fails, both HTTP slot and capacity are released + // This tests the catch block in CreateDownloadTasksAsync + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + // S3 client: Part 1 succeeds, Part 2 HTTP request fails + var callCount = 0; + var mockClient = new Mock(); + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(() => + { + callCount++; + if (callCount == 1) + { + return Task.FromResult(MultipartDownloadTestHelpers.CreateMultipartFirstPartResponse( + partSize, totalParts, totalObjectSize, "test-etag")); + } + // Part 2 HTTP request fails + throw new AmazonS3Exception("Simulated HTTP failure"); + }); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (AmazonS3Exception) + { + // Expected + } + + // Assert - HTTP semaphore should be fully released + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released after HTTP failure. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity should be released for failed part + Assert.IsTrue(capacityReleaseCount >= 1, + $"Capacity should be released for failed Part 2. Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_MultiplePartsFailConcurrently_NoDoubleRelease() + { + // Arrange - Test that when multiple parts fail concurrently, no double releases occur + var totalParts = 5; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + var capacityAcquireCount = 0; + + var httpThrottler = new SemaphoreSlim(3, 3); // Allow 3 concurrent requests + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityAcquireCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Parts 2, 3, 4 all fail concurrently + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsInRange(2, 5, Moq.Range.Inclusive), It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Simulated concurrent failure")); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 3); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected - first failure propagates + } + + // Assert - No double releases should occur + // HTTP semaphore should be back to initial count + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity releases should not exceed acquisitions minus Part 1 + Assert.IsTrue(capacityReleaseCount <= capacityAcquireCount - 1, + $"Capacity should not be double-released. Acquired={capacityAcquireCount}, Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationDuringCapacityWait_DoesNotReleaseUnacquiredResources() + { + // Arrange - Test that when cancellation happens DURING capacity wait, + // no resources are released (since they weren't acquired) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var cts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + var callCount = 0; + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(ct => + { + callCount++; + if (callCount == 1) + { + // Part 1 discovery succeeds + return Task.CompletedTask; + } + // Part 2 capacity wait is cancelled + cts.Cancel(); + throw new OperationCanceledException(); + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 processing succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (OperationCanceledException) + { + // Expected + } + + // Assert - No resources should be released for Part 2 since capacity was never acquired + Assert.AreEqual(0, capacityReleaseCount, + $"No capacity should be released when cancelled during WaitForCapacityAsync. Released={capacityReleaseCount}"); + + // HTTP semaphore should still be at initial count (Part 1's slot was released normally) + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be at initial count. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_SuccessfulDownload_AllResourcesReleasedProperly() + { + // Arrange - Test that on successful download, all resources are released properly + var totalParts = 4; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityAcquireCount = 0; + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(2, 2); + var initialHttpCount = httpThrottler.CurrentCount; + + var mockDataHandler = new Mock(); + + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + Interlocked.Increment(ref capacityAcquireCount); + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // All parts succeed + mockDataHandler + .Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 2); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + await coordinator.DownloadCompletionTask; + + // Assert - All resources should be released properly + // HTTP semaphore should be back to initial count + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released after successful download. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Capacity is acquired for all parts but released differently: + // - Part 1: Capacity is managed by the stream (not via ReleaseCapacity) + // - Parts 2-4: Should NOT call ReleaseCapacity on success (handler manages it) + // Note: ReleaseCapacity is only called on ERROR paths in CreateDownloadTaskAsync + Assert.AreEqual(0, capacityReleaseCount, + $"ReleaseCapacity should not be called on success path (handler manages capacity). Released={capacityReleaseCount}"); + + // Verify all parts acquired capacity + Assert.AreEqual(totalParts, capacityAcquireCount, + $"All parts should have acquired capacity. Acquired={capacityAcquireCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + [TestMethod] + public async Task CreateDownloadTasksAsync_CancellationImmediatelyAfterHttpSlot_ReleasesResourcesCorrectly() + { + // Arrange - Test the specific code path where cancellation is detected + // immediately after acquiring HTTP slot (the second cancellation check in CreateDownloadTasksAsync) + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var capacityReleaseCount = 0; + + var httpThrottler = new SemaphoreSlim(1, 1); + var initialHttpCount = httpThrottler.CurrentCount; + + var internalCts = new CancellationTokenSource(); + var mockDataHandler = new Mock(); + + var capacityCallCount = 0; + mockDataHandler + .Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(() => + { + capacityCallCount++; + return Task.CompletedTask; + }); + + mockDataHandler + .Setup(x => x.ReleaseCapacity()) + .Callback(() => + { + Interlocked.Increment(ref capacityReleaseCount); + }); + + // Part 1 succeeds + mockDataHandler + .Setup(x => x.ProcessPartAsync(1, It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + + // Part 2 processing will trigger cancellation + mockDataHandler + .Setup(x => x.ProcessPartAsync(2, It.IsAny(), It.IsAny())) + .ThrowsAsync(new InvalidOperationException("Part 2 failure triggers cancellation")); + + mockDataHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(concurrentRequests: 1); + var coordinator = new MultipartDownloadManager( + mockClient.Object, request, config, mockDataHandler.Object, null, httpThrottler); + + // Act + await coordinator.StartDownloadAsync(null, CancellationToken.None); + + try + { + await coordinator.DownloadCompletionTask; + } + catch (InvalidOperationException) + { + // Expected + } + + // Assert - Resources should be released correctly + Assert.AreEqual(initialHttpCount, httpThrottler.CurrentCount, + $"HTTP semaphore should be fully released. Initial={initialHttpCount}, Current={httpThrottler.CurrentCount}"); + + // Part 2 should have its capacity released due to error + Assert.IsTrue(capacityReleaseCount >= 1, + $"At least Part 2's capacity should be released on error. Released={capacityReleaseCount}"); + + // Cleanup + httpThrottler.Dispose(); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs new file mode 100644 index 000000000000..c6ceb400d2c1 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartDownloadTestHelpers.cs @@ -0,0 +1,847 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Moq; +using System; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Shared test utilities and helper methods for multipart download tests. + /// Provides mock object creation, test data generation, and common test scenarios. + /// + public static class MultipartDownloadTestHelpers + { + #region Test Constants + + public const int DefaultPartSize = 8 * 1024 * 1024; // 8MB + public const int SmallPartSize = 5 * 1024 * 1024; // 5MB + public const int BufferSize = 8192; // 8KB + public const int DefaultConcurrentRequests = 10; + public const int DefaultMaxInMemoryParts = 5; + + #endregion + + #region GetObjectResponse Creation + + /// + /// Creates a GetObjectResponse with configurable properties for testing. + /// + public static GetObjectResponse CreateMockGetObjectResponse( + long contentLength, + int? partsCount = null, + string contentRange = null, + string eTag = "test-etag", + byte[] testData = null, + bool includeHeaders = true) + { + return CreateMockGetObjectResponseWithEncryption( + contentLength, + partsCount, + contentRange, + eTag, + testData, + includeHeaders, + ServerSideEncryptionMethod.AES256, + null); + } + + /// + /// Creates a GetObjectResponse with configurable properties including encryption settings. + /// + public static GetObjectResponse CreateMockGetObjectResponseWithEncryption( + long contentLength, + int? partsCount, + string contentRange, + string eTag, + byte[] testData, + bool includeHeaders, + ServerSideEncryptionMethod serverSideEncryptionMethod, + string serverSideEncryptionKeyManagementServiceKeyId) + { + var response = new GetObjectResponse(); + + // Set ContentLength + response.ContentLength = contentLength; + + // Set ETag + response.ETag = eTag; + + // PartsCount (for multipart uploads) + if (partsCount.HasValue) + { + response.PartsCount = partsCount.Value; + } + + // ContentRange (for range requests) + if (contentRange != null) + { + response.ContentRange = contentRange; + } + + // ResponseStream with test data + if (testData == null) + { + testData = GenerateTestData((int)contentLength, 0); + } + response.ResponseStream = new MemoryStream(testData); + + // Headers + if (includeHeaders) + { + response.Headers["x-amz-server-side-encryption"] = "AES256"; + } + + // Server-side encryption + response.ServerSideEncryptionMethod = serverSideEncryptionMethod; + + // KMS key ID (if provided) + if (!string.IsNullOrEmpty(serverSideEncryptionKeyManagementServiceKeyId)) + { + response.ServerSideEncryptionKeyManagementServiceKeyId = serverSideEncryptionKeyManagementServiceKeyId; + } + + return response; + } + + /// + /// Creates a GetObjectResponse for a single-part download scenario. + /// + public static GetObjectResponse CreateSinglePartResponse( + long objectSize, + string eTag = "single-part-etag") + { + return CreateMockGetObjectResponse( + contentLength: objectSize, + partsCount: null, // No PartsCount indicates single part + contentRange: null, + eTag: eTag); + } + + /// + /// Creates a GetObjectResponse for the first part of a multipart download (PART strategy). + /// + public static GetObjectResponse CreateMultipartFirstPartResponse( + long partSize, + int totalParts, + long totalObjectSize, + string eTag = "multipart-etag") + { + // ContentRange format: "bytes 0-{partSize-1}/{totalObjectSize}" + var contentRange = $"bytes 0-{partSize - 1}/{totalObjectSize}"; + + return CreateMockGetObjectResponse( + contentLength: partSize, + partsCount: totalParts, + contentRange: contentRange, + eTag: eTag); + } + + /// + /// Creates a GetObjectResponse for a range request (RANGE strategy). + /// + public static GetObjectResponse CreateRangeResponse( + long rangeStart, + long rangeEnd, + long totalObjectSize, + string eTag = "range-etag") + { + var rangeSize = rangeEnd - rangeStart + 1; + var contentRange = $"bytes {rangeStart}-{rangeEnd}/{totalObjectSize}"; + + return CreateMockGetObjectResponse( + contentLength: rangeSize, + partsCount: null, + contentRange: contentRange, + eTag: eTag); + } + + #endregion + + #region Mock S3 Client Creation + + /// + /// Creates a mock S3 client with configurable GetObjectAsync behavior. + /// + public static Mock CreateMockS3Client( + Func> getObjectBehavior = null) + { + var mockClient = new Mock(); + + if (getObjectBehavior != null) + { + mockClient + .Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Returns(getObjectBehavior); + } + + // Setup Config property - BufferSize is not virtual, so set directly + var mockConfig = new Mock(); + mockConfig.Object.BufferSize = BufferSize; + mockClient.Setup(x => x.Config).Returns(mockConfig.Object); + + return mockClient; + } + + /// + /// Creates a mock S3 client that returns responses for multiple parts in sequence. + /// + public static Mock CreateMockS3ClientForMultipart( + int totalParts, + long partSize, + long totalObjectSize, + string eTag = "multipart-etag", + bool usePartStrategy = true) + { + var partResponses = new GetObjectResponse[totalParts]; + + for (int i = 0; i < totalParts; i++) + { + int partNumber = i + 1; + long actualPartSize = (partNumber == totalParts) + ? totalObjectSize - (partSize * (totalParts - 1)) // Last part may be smaller + : partSize; + + GetObjectResponse response; + + if (usePartStrategy) + { + // PART strategy: First part has PartsCount + if (partNumber == 1) + { + response = CreateMultipartFirstPartResponse(actualPartSize, totalParts, totalObjectSize, eTag); + } + else + { + var contentRange = $"bytes {(partNumber - 1) * partSize}-{(partNumber - 1) * partSize + actualPartSize - 1}/{totalObjectSize}"; + response = CreateMockGetObjectResponse(actualPartSize, totalParts, contentRange, eTag); + } + } + else + { + // RANGE strategy: Use byte ranges + long rangeStart = (partNumber - 1) * partSize; + long rangeEnd = rangeStart + actualPartSize - 1; + response = CreateRangeResponse(rangeStart, rangeEnd, totalObjectSize, eTag); + } + + partResponses[i] = response; + } + + var callCount = 0; + return CreateMockS3Client((request, ct) => + { + var responseIndex = Interlocked.Increment(ref callCount) - 1; + if (responseIndex >= partResponses.Length) + throw new InvalidOperationException($"Unexpected GetObjectAsync call #{responseIndex + 1}"); + + return Task.FromResult(partResponses[responseIndex]); + }); + } + + #endregion + + #region Test Data Generation + + /// + /// Generates predictable test data with a repeating pattern for verification. + /// + public static byte[] GenerateTestData(int size, int seed = 0) + { + return Enumerable.Range(seed, size).Select(i => (byte)(i % 256)).ToArray(); + } + + /// + /// Generates test data with a part-specific pattern (all bytes set to part number). + /// + public static byte[] GeneratePartSpecificData(int size, int partNumber) + { + return Enumerable.Repeat((byte)(partNumber % 256), size).ToArray(); + } + + /// + /// Generates mixed pattern data for boundary testing. + /// + public static byte[] CreateMixedPattern(int size, int seed) + { + var random = new Random(seed); + var data = new byte[size]; + + // Create a pattern with different regions + for (int i = 0; i < size; i++) + { + if (i < size / 3) + data[i] = (byte)(i % 256); // Sequential + else if (i < 2 * size / 3) + data[i] = (byte)random.Next(256); // Random + else + data[i] = (byte)((size - i) % 256); // Reverse sequential + } + + return data; + } + + /// + /// Verifies that two byte arrays are identical. + /// + public static bool VerifyDataMatch(byte[] expected, byte[] actual, int offset, int count) + { + if (actual == null || expected == null) + return false; + + if (offset + count > actual.Length || count > expected.Length) + return false; + + for (int i = 0; i < count; i++) + { + if (actual[offset + i] != expected[i]) + return false; + } + + return true; + } + + #endregion + + #region BufferedDownloadConfiguration Creation + + /// + /// Creates a default BufferedDownloadConfiguration for testing. + /// + internal static BufferedDownloadConfiguration CreateBufferedDownloadConfiguration( + int concurrentRequests = DefaultConcurrentRequests, + int maxInMemoryParts = DefaultMaxInMemoryParts, + int bufferSize = BufferSize, + long partSize = DefaultPartSize) + { + return new BufferedDownloadConfiguration( + concurrentRequests, + maxInMemoryParts, + bufferSize, + partSize); + } + + /// + /// Creates a BufferedDownloadConfiguration with minimal settings for testing. + /// + internal static BufferedDownloadConfiguration CreateMinimalBufferedDownloadConfiguration() + { + return new BufferedDownloadConfiguration(1, 1, 1024, 8 * 1024 * 1024); + } + + #endregion + + #region Mock Request Creation + + /// + /// Creates a mock TransferUtilityOpenStreamRequest for testing. + /// + public static TransferUtilityOpenStreamRequest CreateOpenStreamRequest( + string bucketName = "test-bucket", + string key = "test-key", + long? partSize = null, + MultipartDownloadType downloadType = MultipartDownloadType.PART, + int? maxInMemoryParts = null) + { + var request = new TransferUtilityOpenStreamRequest + { + BucketName = bucketName, + Key = key, + MultipartDownloadType = downloadType + }; + + if (partSize.HasValue) + { + request.PartSize = partSize.Value; + } + + if (maxInMemoryParts.HasValue) + { + request.MaxInMemoryParts = maxInMemoryParts.Value; + } + + return request; + } + + #endregion + + #region Advanced Mock Creation Helpers + + /// + /// Creates a mock S3 client that returns responses sequentially. + /// + public static Mock CreateSequentialMockClient(params GetObjectResponse[] responses) + { + var callCount = 0; + return CreateMockS3Client((request, ct) => + { + var responseIndex = Interlocked.Increment(ref callCount) - 1; + if (responseIndex >= responses.Length) + throw new InvalidOperationException($"Unexpected GetObjectAsync call #{responseIndex + 1}"); + return Task.FromResult(responses[responseIndex]); + }); + } + + /// + /// Creates a mock S3 client that captures the cancellation token used. + /// + public static Mock CreateMockS3ClientWithTokenCapture(Action tokenCapture) + { + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .Callback((req, ct) => tokenCapture(ct)) + .ReturnsAsync(CreateSinglePartResponse(1024)); + + var mockConfig = new Mock(); + mockConfig.Object.BufferSize = BufferSize; + mockClient.Setup(x => x.Config).Returns(mockConfig.Object); + + return mockClient; + } + + /// + /// Creates a mock S3 client that throws OperationCanceledException. + /// + public static Mock CreateMockS3ClientWithCancellation() + { + var mockClient = new Mock(); + mockClient.Setup(x => x.GetObjectAsync(It.IsAny(), It.IsAny())) + .ThrowsAsync(new OperationCanceledException()); + + var mockConfig = new Mock(); + mockConfig.Object.BufferSize = BufferSize; + mockClient.Setup(x => x.Config).Returns(mockConfig.Object); + + return mockClient; + } + + #endregion + + #region Test Setup Helpers + + /// + /// Configuration for validation tests. + /// + internal class ValidationTestConfig + { + public long PartSize { get; set; } + public long TotalSize { get; set; } + public int TotalParts { get; set; } + public string ETag { get; set; } + } + + /// + /// Types of validation failures that can occur during multipart downloads. + /// + public enum ValidationFailureType + { + MissingContentRange, + InvalidContentRangeFormat, + UnparseableRange, + RangeMismatch, + ETagMismatch + } + + /// + /// Creates a mock S3 client configured to produce a specific validation failure. + /// + internal static Mock CreateMockClientWithValidationFailure(ValidationFailureType failureType) + { + var config = new ValidationTestConfig + { + PartSize = 8 * 1024 * 1024, + TotalSize = 20 * 1024 * 1024, + TotalParts = 3, + ETag = "test-etag" + }; + + GetObjectResponse firstPartResponse; + GetObjectResponse secondPartResponse; + + if (failureType == ValidationFailureType.ETagMismatch) + { + // PART strategy for ETag testing + firstPartResponse = CreateMultipartFirstPartResponse( + config.PartSize, config.TotalParts, config.TotalSize, config.ETag); + secondPartResponse = CreateMockGetObjectResponse( + config.PartSize, config.TotalParts, + $"bytes {config.PartSize}-{2 * config.PartSize - 1}/{config.TotalSize}", + "different-etag"); + } + else + { + // RANGE strategy for ContentRange validation testing + firstPartResponse = CreateRangeResponse(0, config.PartSize - 1, config.TotalSize, config.ETag); + secondPartResponse = CreateInvalidResponse(failureType, config); + } + + return CreateSequentialMockClient(firstPartResponse, secondPartResponse); + } + + /// + /// Creates an invalid GetObjectResponse based on the failure type. + /// + private static GetObjectResponse CreateInvalidResponse(ValidationFailureType failureType, ValidationTestConfig config) + { + return failureType switch + { + ValidationFailureType.MissingContentRange => CreateMockGetObjectResponse( + config.PartSize, null, null, config.ETag), + ValidationFailureType.InvalidContentRangeFormat => CreateMockGetObjectResponse( + config.PartSize, null, "invalid-format-no-slash", config.ETag), + ValidationFailureType.UnparseableRange => CreateMockGetObjectResponse( + config.PartSize, null, "bytes abc-xyz/20971520", config.ETag), + ValidationFailureType.RangeMismatch => CreateMockGetObjectResponse( + config.PartSize, null, $"bytes 0-{config.PartSize - 1}/{config.TotalSize}", config.ETag), + _ => throw new ArgumentException($"Unknown failure type: {failureType}") + }; + } + + /// + /// Creates a coordinator configured for validation testing. + /// + internal static MultipartDownloadManager CreateCoordinatorForValidationTest( + IAmazonS3 client, ValidationFailureType failureType) + { + var downloadType = failureType == ValidationFailureType.ETagMismatch + ? MultipartDownloadType.PART + : MultipartDownloadType.RANGE; + + var request = CreateOpenStreamRequest( + partSize: failureType == ValidationFailureType.ETagMismatch ? null : 8 * 1024 * 1024, + downloadType: downloadType); + + var config = CreateBufferedDownloadConfiguration(concurrentRequests: 1); + + var mockDataHandler = new Mock(); + mockDataHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockDataHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + return new MultipartDownloadManager(client, request, config, mockDataHandler.Object); + } + + /// + /// Creates a response appropriate for discovery based on download type and parameters. + /// + internal static GetObjectResponse CreateDiscoveryResponse( + MultipartDownloadType downloadType, long objectSize, int? partsCount) + { + if (downloadType == MultipartDownloadType.PART) + { + if (partsCount == null || partsCount == 1) + { + return CreateSinglePartResponse(objectSize, "single-part-etag"); + } + else + { + long partSize = objectSize / partsCount.Value; + return CreateMultipartFirstPartResponse(partSize, partsCount.Value, objectSize, "multipart-etag"); + } + } + else // RANGE + { + if (objectSize <= DefaultPartSize) + { + return CreateMockGetObjectResponse(objectSize, null, null, "small-object-etag"); + } + else + { + return CreateRangeResponse(0, DefaultPartSize - 1, objectSize, "range-etag"); + } + } + } + + /// + /// Creates a complete test setup for discovery testing. + /// + internal static (Mock, MultipartDownloadManager) CreateDiscoveryTestSetup( + MultipartDownloadType downloadType, long objectSize, int? partsCount, long? partSize = null) + { + var mockResponse = CreateDiscoveryResponse(downloadType, objectSize, partsCount); + var mockClient = CreateMockS3Client((req, ct) => Task.FromResult(mockResponse)); + var request = CreateOpenStreamRequest( + partSize: partSize ?? (downloadType == MultipartDownloadType.RANGE ? DefaultPartSize : (long?)null), + downloadType: downloadType); + var config = CreateBufferedDownloadConfiguration(); + + var mockDataHandler = new Mock(); + mockDataHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockDataHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + + var coordinator = new MultipartDownloadManager(mockClient.Object, request, config, mockDataHandler.Object); + + return (mockClient, coordinator); + } + + /// + /// Creates a basic mock data handler for testing. + /// + internal static Mock CreateMockDataHandler() + { + var mockHandler = new Mock(); + mockHandler.Setup(x => x.ProcessPartAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.CompletedTask); + mockHandler.Setup(x => x.WaitForCapacityAsync(It.IsAny())) + .Returns(Task.CompletedTask); + mockHandler.Setup(x => x.ReleaseCapacity()); + mockHandler.Setup(x => x.OnDownloadComplete(It.IsAny())); + return mockHandler; + } + + #endregion + + #region FileDownloadConfiguration Creation + + /// + /// Creates a default FileDownloadConfiguration for testing. + /// + internal static FileDownloadConfiguration CreateFileDownloadConfiguration( + int concurrentRequests = DefaultConcurrentRequests, + int bufferSize = BufferSize, + long partSize = DefaultPartSize, + string destinationPath = null) + { + destinationPath = destinationPath ?? Path.Combine(Path.GetTempPath(), $"test-download-{Guid.NewGuid()}.dat"); + return new FileDownloadConfiguration( + concurrentRequests, + bufferSize, + partSize, + destinationPath); + } + + #endregion + + #region TransferUtilityDownloadRequest Creation + + /// + /// Creates a mock TransferUtilityDownloadRequest for testing. + /// + public static TransferUtilityDownloadRequest CreateDownloadRequest( + string bucketName = "test-bucket", + string key = "test-key", + string filePath = null, + long? partSize = null) + { + filePath = filePath ?? Path.Combine(Path.GetTempPath(), $"test-download-{Guid.NewGuid()}.dat"); + + var request = new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = key, + FilePath = filePath + }; + + if (partSize.HasValue) + { + request.PartSize = partSize.Value; + } + + return request; + } + + #endregion + + #region Temporary File Management + + /// + /// Creates a temporary file path for testing. + /// Returns path in temp directory with unique name. + /// + public static string CreateTempFilePath(string fileName = null) + { + fileName = fileName ?? $"test-download-{Guid.NewGuid()}.dat"; + return Path.Combine(Path.GetTempPath(), fileName); + } + + /// + /// Cleans up temporary files used in tests. + /// Safe to call even if files don't exist. + /// + public static void CleanupTempFiles(params string[] filePaths) + { + foreach (var filePath in filePaths) + { + if (string.IsNullOrEmpty(filePath)) + continue; + + try + { + if (File.Exists(filePath)) + { + File.Delete(filePath); + } + } + catch + { + // Best effort cleanup - don't throw + } + } + } + + /// + /// Creates a temporary directory for test files. + /// + public static string CreateTempDirectory() + { + var tempDir = Path.Combine(Path.GetTempPath(), $"S3Tests_{Guid.NewGuid()}"); + Directory.CreateDirectory(tempDir); + return tempDir; + } + + /// + /// Cleans up a temporary directory and all its contents. + /// Safe to call even if directory doesn't exist. + /// + public static void CleanupTempDirectory(string directoryPath) + { + if (string.IsNullOrEmpty(directoryPath)) + return; + + try + { + if (Directory.Exists(directoryPath)) + { + Directory.Delete(directoryPath, recursive: true); + } + } + catch + { + // Best effort cleanup - don't throw + } + } + + #endregion + + #region File Verification + + /// + /// Verifies file contents match expected data. + /// + public static bool VerifyFileContents(string filePath, byte[] expectedData) + { + if (!File.Exists(filePath)) + return false; + + try + { + var actualData = File.ReadAllBytes(filePath); + return actualData.SequenceEqual(expectedData); + } + catch + { + return false; + } + } + + /// + /// Verifies file exists and has expected size. + /// + public static bool VerifyFileSize(string filePath, long expectedSize) + { + if (!File.Exists(filePath)) + return false; + + try + { + var fileInfo = new FileInfo(filePath); + return fileInfo.Length == expectedSize; + } + catch + { + return false; + } + } + + /// + /// Reads file contents for verification. + /// + public static byte[] ReadFileContents(string filePath) + { + if (!File.Exists(filePath)) + return null; + + try + { + return File.ReadAllBytes(filePath); + } + catch + { + return null; + } + } + + #endregion + + #region Multi-part File Writing Simulation + + /// + /// Simulates writing multiple parts to a file for testing. + /// Each part has predictable data based on part number and seed. + /// + public static void WritePartsToFile( + string filePath, + int totalParts, + long partSize, + int seed = 0) + { + using (var fileStream = new FileStream(filePath, FileMode.Create, FileAccess.Write, FileShare.None)) + { + for (int i = 0; i < totalParts; i++) + { + var partData = GenerateTestData((int)partSize, seed + i * (int)partSize); + fileStream.Write(partData, 0, partData.Length); + } + } + } + + /// + /// Verifies multi-part file contents match expected pattern. + /// + public static bool VerifyMultipartFileContents( + string filePath, + int totalParts, + long partSize, + int seed = 0) + { + if (!File.Exists(filePath)) + return false; + + try + { + using (var fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read)) + { + for (int i = 0; i < totalParts; i++) + { + var expectedData = GenerateTestData((int)partSize, seed + i * (int)partSize); + var actualData = new byte[partSize]; + + var bytesRead = fileStream.Read(actualData, 0, (int)partSize); + if (bytesRead != partSize) + return false; + + if (!expectedData.SequenceEqual(actualData)) + return false; + } + + // Verify no extra data + return fileStream.Position == fileStream.Length; + } + } + catch + { + return false; + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs b/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs index 19ea71304c8f..9f0a6e4dc024 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/MultipartUploadValidationTests.cs @@ -69,6 +69,18 @@ public async Task Validation_HappyPath() return new UploadPartResponse { PartNumber = request.PartNumber }; }); + s3Client + .Setup(x => x.CompleteMultipartUploadAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(new CompleteMultipartUploadResponse + { + BucketName = "test-bucket", + Key = "test", + ETag = "test-etag", + Location = "https://test-bucket.s3.amazonaws.com/test" + }); + var uploadRequest = new TransferUtilityUploadRequest { FilePath = _tempFilePath, diff --git a/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs new file mode 100644 index 000000000000..40156c316ac7 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/OpenStreamWithResponseCommandTests.cs @@ -0,0 +1,524 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class OpenStreamWithResponseCommandTests + { + #region ExecuteAsync Tests - Success + + [TestMethod] + public async Task ExecuteAsync_CreatesBufferedMultipartStream() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.IsInstanceOfType(response.ResponseStream, typeof(BufferedMultipartStream)); + } + + [TestMethod] + public async Task ExecuteAsync_CallsInitializeAsync() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.IsNotNull(stream.DiscoveryResult); // Indicates initialization occurred + } + + [TestMethod] + public async Task ExecuteAsync_ReturnsResponse() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsInstanceOfType(response, typeof(TransferUtilityOpenStreamResponse)); + } + + [TestMethod] + public async Task ExecuteAsync_SetsResponseStream() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response.ResponseStream); + Assert.IsTrue(response.ResponseStream.CanRead); + } + + #endregion + + #region ExecuteAsync Tests - Response Mapping + + [TestMethod] + public async Task ExecuteAsync_MapsMetadataFromInitialResponse() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1024, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponseWithEncryption( + contentLength: 1024, + partsCount: null, + contentRange: null, + eTag: "test-etag-123", + testData: testData, + includeHeaders: true, + serverSideEncryptionMethod: ServerSideEncryptionMethod.AES256, + serverSideEncryptionKeyManagementServiceKeyId: "test-kms-key"); + + // Add custom headers + mockResponse.Headers["Content-Language"] = "en-US"; + mockResponse.Headers["Cache-Control"] = "max-age=3600"; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("test-etag-123", response.ETag); + Assert.IsNotNull(response.Headers); + Assert.AreEqual(ServerSideEncryptionMethod.AES256, response.ServerSideEncryptionMethod); + Assert.AreEqual("test-kms-key", response.ServerSideEncryptionKeyManagementServiceKeyId); + } + + [TestMethod] + public async Task ExecuteAsync_SinglePart_MapsFromSinglePartResponse() + { + // Arrange + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: 2048, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("single-part-etag", response.ETag); + } + + [TestMethod] + public async Task ExecuteAsync_Multipart_MapsFromInitialResponse() + { + // Arrange + var totalObjectSize = 50 * 1024 * 1024; + var partSize = 10 * 1024 * 1024; + var totalParts = 5; + + // Use CreateMockS3ClientForMultipart to properly mock all parts + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multipart-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual("multipart-etag", response.ETag); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion + + #region ContentLength and ContentRange Validation Tests + + [TestMethod] + public async Task ExecuteAsync_SinglePart_SetsCorrectContentLengthAndRange() + { + // Arrange + var objectSize = 2048; + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: objectSize, + eTag: "single-part-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - SEP Part GET Step 7 / Ranged GET Step 9 + Assert.AreEqual(objectSize, response.Headers.ContentLength, + "ContentLength should equal total object size"); + Assert.AreEqual($"bytes 0-{objectSize - 1}/{objectSize}", response.ContentRange, + "ContentRange should be bytes 0-(ContentLength-1)/ContentLength"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_MultipartPartStrategy_SetsCorrectContentLengthAndRange() + { + // Arrange + var totalParts = 5; + var partSize = 10 * 1024 * 1024; // 10MB per part + var totalObjectSize = (long)totalParts * partSize; // 50MB total + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multipart-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + downloadType: MultipartDownloadType.PART); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - SEP Part GET Step 7 + Assert.AreEqual(totalObjectSize, response.Headers.ContentLength, + "ContentLength should equal total object size, not first part size"); + Assert.AreEqual($"bytes 0-{totalObjectSize - 1}/{totalObjectSize}", response.ContentRange, + "ContentRange should be bytes 0-(ContentLength-1)/ContentLength for entire object"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_MultipartRangeStrategy_SetsCorrectContentLengthAndRange() + { + // Arrange + var totalObjectSize = 25 * 1024 * 1024; // 25MB total + var partSize = 8 * 1024 * 1024; // 8MB per part + var totalParts = (int)Math.Ceiling((double)totalObjectSize / partSize); // 4 parts + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "range-multipart-etag", usePartStrategy: false); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest( + partSize: partSize, + downloadType: MultipartDownloadType.RANGE); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - SEP Ranged GET Step 9 + Assert.AreEqual(totalObjectSize, response.Headers.ContentLength, + "ContentLength should equal total object size, not first range size"); + Assert.AreEqual($"bytes 0-{totalObjectSize - 1}/{totalObjectSize}", response.ContentRange, + "ContentRange should be bytes 0-(ContentLength-1)/ContentLength for entire object"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_ZeroByteObject_ContentRangeIsNull() + { + // Arrange - Mock a 0-byte object + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse( + objectSize: 0, + eTag: "empty-etag"); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert - S3 returns null ContentRange for 0-byte objects + Assert.AreEqual(0, response.Headers.ContentLength, + "ContentLength should be 0 for empty object"); + Assert.IsNull(response.ContentRange, + "ContentRange should be null for 0-byte objects (matching S3 behavior)"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion + + #region Integration Tests + + [TestMethod] + public async Task ExecuteAsync_EndToEnd_SinglePart() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var mockResponse = MultipartDownloadTestHelpers.CreateMockGetObjectResponse( + contentLength: 512, + partsCount: null, + contentRange: null, + eTag: "single-etag", + testData: testData); + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + + // Verify we can read from the stream + var buffer = new byte[256]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.AreEqual(256, bytesRead); + + // Verify data matches + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, 256)); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_EndToEnd_Multipart() + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multi-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.IsInstanceOfType(response.ResponseStream, typeof(BufferedMultipartStream)); + + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.AreEqual(totalParts, stream.DiscoveryResult.TotalParts); + Assert.AreEqual(totalObjectSize, stream.DiscoveryResult.ObjectSize); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion + + #region MaxInMemoryParts Tests + + [TestMethod] + public async Task ExecuteAsync_UsesRequestMaxInMemoryParts() + { + // Arrange + var customMaxParts = 256; + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(1024, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = customMaxParts; + + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + // Stream should be created successfully with request's MaxInMemoryParts value + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_WithDefaultMaxInMemoryParts_WorksCorrectly() + { + // Arrange - Use default MaxInMemoryParts from request (1024) + var mockResponse = MultipartDownloadTestHelpers.CreateSinglePartResponse(2048, "test-etag"); + var mockClient = MultipartDownloadTestHelpers.CreateMockS3Client( + (req, ct) => Task.FromResult(mockResponse)); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + // Don't explicitly set MaxInMemoryParts - should use default of 1024 + + var config = new TransferUtilityConfig(); + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + + // Should work with default value + var buffer = new byte[1024]; + var bytesRead = await response.ResponseStream.ReadAsync(buffer, 0, buffer.Length); + Assert.IsTrue(bytesRead > 0, "Should successfully read with default MaxInMemoryParts"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [DataTestMethod] + [DataRow(1, DisplayName = "Minimum (1 part)")] + [DataRow(128, DisplayName = "Small (128 parts)")] + [DataRow(1024, DisplayName = "Default (1024 parts)")] + [DataRow(2048, DisplayName = "Large (2048 parts)")] + public async Task ExecuteAsync_WithVariousMaxInMemoryParts_CreatesStreamSuccessfully( + int maxInMemoryParts) + { + // Arrange + var totalParts = 3; + var partSize = 8 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "test-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(); + request.MaxInMemoryParts = maxInMemoryParts; + + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 1 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + Assert.IsInstanceOfType(response.ResponseStream, typeof(BufferedMultipartStream)); + + // Verify stream works + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.IsNotNull(stream.DiscoveryResult); + + // Cleanup + response.ResponseStream.Dispose(); + } + + [TestMethod] + public async Task ExecuteAsync_MultipartWithCustomMaxInMemoryParts_IntegrationTest() + { + // Arrange - Larger multipart download with custom memory limit + var customMaxParts = 64; // Lower memory limit for this test + var totalParts = 10; + var partSize = 5 * 1024 * 1024; + var totalObjectSize = totalParts * partSize; + + var mockClient = MultipartDownloadTestHelpers.CreateMockS3ClientForMultipart( + totalParts, partSize, totalObjectSize, "multipart-etag", usePartStrategy: true); + + var request = MultipartDownloadTestHelpers.CreateOpenStreamRequest(partSize: partSize); + request.MaxInMemoryParts = customMaxParts; + + var config = new TransferUtilityConfig { ConcurrentServiceRequests = 3 }; + var command = new OpenStreamWithResponseCommand(mockClient.Object, request, config); + + // Act + var response = await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.IsNotNull(response); + Assert.IsNotNull(response.ResponseStream); + + var stream = (BufferedMultipartStream)response.ResponseStream; + Assert.AreEqual(totalParts, stream.DiscoveryResult.TotalParts); + + // Verify we can read from the stream with custom MaxInMemoryParts + var buffer = new byte[1024 * 1024]; // 1MB buffer + var bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length); + Assert.IsTrue(bytesRead > 0, + $"Should successfully read multipart download with MaxInMemoryParts={customMaxParts}"); + + // Cleanup + response.ResponseStream.Dispose(); + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs new file mode 100644 index 000000000000..37ab24be0fb3 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/PartBufferManagerTests.cs @@ -0,0 +1,1656 @@ +using Amazon.S3.Transfer.Internal; +using Amazon.S3.Model; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.IO; +using System.Collections.Generic; +using System.Buffers; +using System.Threading; +using System.Threading.Tasks; +using System.Linq; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for PartBufferManager class. + /// Tests buffer management, sequential access, and cross-part boundary reading. + /// + [TestClass] + public class PartBufferManagerTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidConfiguration_CreatesManager() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + + // Act + var manager = new PartBufferManager(config); + + // Assert + Assert.IsNotNull(manager); + Assert.AreEqual(1, manager.NextExpectedPartNumber); + + // Cleanup + manager.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullConfiguration_ThrowsArgumentNullException() + { + // Act + var manager = new PartBufferManager(null); + + // Assert - ExpectedException + } + + #endregion + + #region Property Tests + + [TestMethod] + public void NextExpectedPartNumber_StartsAtOne() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act & Assert + Assert.AreEqual(1, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task NextExpectedPartNumber_IncrementsAfterPartComplete() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer); + + // Read part 1 completely + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Act & Assert - Should advance to part 2 + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region WaitForBufferSpaceAsync Tests + + [TestMethod] + public async Task WaitForBufferSpaceAsync_InitialState_AllowsImmediateAccess() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 5); + var manager = new PartBufferManager(config); + + try + { + // Act - Should complete immediately + var task = manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(task.IsCompleted); + await task; // Should not throw + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task WaitForBufferSpaceAsync_WhenMaxPartsReached_Blocks() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 2); + var manager = new PartBufferManager(config); + + try + { + // Fill up to max parts + for (int i = 1; i <= 2; i++) + { + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(i, testBuffer, 512); + manager.AddBuffer(partBuffer); + } + + // Act - Try to wait for space (should block) + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Give a small delay to ensure it would block + await Task.Delay(50); + + // Assert - Should not have completed + Assert.IsFalse(waitTask.IsCompleted); + + // Cleanup - release space to unblock + manager.ReleaseBufferSpace(); + await waitTask; + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task WaitForBufferSpaceAsync_AfterRelease_AllowsAccess() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 1); + var manager = new PartBufferManager(config); + + try + { + // Take the one available slot + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + manager.AddBuffer(partBuffer); + + // Release space + manager.ReleaseBufferSpace(); + + // Act - Should be able to wait again + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Assert + Assert.IsTrue(waitTask.IsCompleted); + await waitTask; + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task WaitForBufferSpaceAsync_WithCancellation_ThrowsOperationCanceledException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 1); + var manager = new PartBufferManager(config); + var cts = new CancellationTokenSource(); + + try + { + // Take the one available slot + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Cancel immediately + cts.Cancel(); + + // Act & Assert + // Use try-catch to accept both OperationCanceledException and TaskCanceledException + // (TaskCanceledException derives from OperationCanceledException) + try + { + await manager.WaitForBufferSpaceAsync(cts.Token); + Assert.Fail("Expected OperationCanceledException was not thrown"); + } + catch (OperationCanceledException ex) + { + // Success - accepts both OperationCanceledException and derived types like TaskCanceledException + Assert.AreEqual(cts.Token, ex.CancellationToken, "CancellationToken should match the provided token"); + } + } + finally + { + manager.Dispose(); + cts.Dispose(); + } + } + + #endregion + + #region AddBuffer Tests + + [TestMethod] + public async Task AddBuffer_CreatesBufferedDataSource() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Act + manager.AddBuffer(partBuffer); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void AddBuffer_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + manager.AddBuffer((IPartDataSource)null); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task AddBuffer_SignalsPartAvailable() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before part is available + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(50); + + // Add the part + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer); + + // Assert - Read should complete + int bytesRead = await readTask; + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region AddDataSource Tests + + [TestMethod] + public async Task AddDataSource_AddsToCollection() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var dataSource = new BufferedDataSource(partBuffer); + + // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddDataSource(dataSource); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void AddDataSource_WithNullDataSource_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + manager.AddDataSource(null); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void AddDataSource_WithDuplicatePartNumber_ThrowsInvalidOperationException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer1 = ArrayPool.Shared.Rent(512); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 512); + var dataSource1 = new BufferedDataSource(partBuffer1); + manager.AddDataSource(dataSource1); + + // Try to add duplicate part 1 + byte[] testBuffer2 = ArrayPool.Shared.Rent(512); + var partBuffer2 = new StreamPartBuffer(1, testBuffer2, 512); + var dataSource2 = new BufferedDataSource(partBuffer2); + + // Act + manager.AddDataSource(dataSource2); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Sequential Access + + [TestMethod] + public async Task ReadAsync_ReadsDataSequentially() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer); + + // Act + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_AdvancesNextExpectedPartNumber() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer); + + // Read part 1 completely + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + await manager.ReadAsync(null, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + byte[] readBuffer = new byte[512]; + + try + { + // Act + await manager.ReadAsync(readBuffer, -1, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + byte[] readBuffer = new byte[512]; + + try + { + // Act + await manager.ReadAsync(readBuffer, 0, -1, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + byte[] readBuffer = new byte[512]; + + try + { + // Act + await manager.ReadAsync(readBuffer, 400, 200, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Waiting + + [TestMethod] + public async Task ReadAsync_WaitsForPartAvailability() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before part is available + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(100); + Assert.IsFalse(readTask.IsCompleted); + + // Add the part asynchronously + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer); + + // Assert - Read should complete + int bytesRead = await readTask; + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WhenDownloadComplete_ReturnsZero() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Mark download as complete + manager.MarkDownloadComplete(null); + + // Act + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(0, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public async Task ReadAsync_WhenDownloadFailed_ThrowsException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Mark download as failed + var testException = new Exception("Download failed"); + manager.MarkDownloadComplete(testException); + + // Act + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Cross-Part Boundary Reading + + [TestMethod] + public async Task ReadAsync_ReadingAcrossPartBoundary_FillsBuffer() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add Part 1 (100 bytes) + byte[] testData1 = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + byte[] testBuffer1 = ArrayPool.Shared.Rent(100); + Buffer.BlockCopy(testData1, 0, testBuffer1, 0, 100); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer1); + + // Add Part 2 (100 bytes) + byte[] testData2 = MultipartDownloadTestHelpers.GenerateTestData(100, 100); + byte[] testBuffer2 = ArrayPool.Shared.Rent(100); + Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer2); + + // Act - Request 150 bytes (spans both parts) + byte[] readBuffer = new byte[150]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 150, CancellationToken.None); + + // Assert + Assert.AreEqual(150, bytesRead); + + // Verify first 100 bytes from part 1 + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData1, readBuffer, 0, 100)); + + // Verify next 50 bytes from part 2 + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData2, readBuffer, 100, 50)); + + // Should still be on part 2 (not complete yet, 50 bytes remaining) + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_MultiplePartsInSingleRead_AdvancesCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add 3 small parts (50 bytes each) + for (int i = 1; i <= 3; i++) + { + byte[] testData = MultipartDownloadTestHelpers.GeneratePartSpecificData(50, i); + byte[] testBuffer = ArrayPool.Shared.Rent(50); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 50); + var partBuffer = new StreamPartBuffer(i, testBuffer, 50); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer); + } + + // Act - Read 150 bytes (all 3 parts) + byte[] readBuffer = new byte[150]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 150, CancellationToken.None); + + // Assert + Assert.AreEqual(150, bytesRead); + Assert.AreEqual(4, manager.NextExpectedPartNumber); // Advanced to part 4 + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_PartCompletes_AdvancesToNextPart() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add part 1 + byte[] testBuffer1 = ArrayPool.Shared.Rent(100); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer1); + + // Read part 1 completely + byte[] readBuffer = new byte[100]; + await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // Assert - Should advance to part 2 + Assert.AreEqual(2, manager.NextExpectedPartNumber); + + // Add part 2 + byte[] testBuffer2 = ArrayPool.Shared.Rent(100); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer2); + + // Read part 2 + int bytesRead = await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // Assert + Assert.AreEqual(100, bytesRead); + Assert.AreEqual(3, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_EmptyPart_ContinuesToNextPart() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add empty part 1 + byte[] testBuffer1 = ArrayPool.Shared.Rent(100); + var partBuffer1 = new StreamPartBuffer(1, testBuffer1, 0); // 0 bytes + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer1); + + // Add part 2 with data + byte[] testData2 = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + byte[] testBuffer2 = ArrayPool.Shared.Rent(100); + Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 100); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 100); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer2); + + // Act - Try to read 100 bytes starting from part 1 + byte[] readBuffer = new byte[100]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // Assert - Should skip empty part 1 and read from part 2 + Assert.AreEqual(100, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData2, readBuffer, 0, 100)); + Assert.AreEqual(3, manager.NextExpectedPartNumber); // Advanced past both parts + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReleaseBufferSpace Tests + + [TestMethod] + public void ReleaseBufferSpace_IncreasesAvailableSlots() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: 1); + var manager = new PartBufferManager(config); + + try + { + // Take the slot + var task1 = manager.WaitForBufferSpaceAsync(CancellationToken.None); + Assert.IsTrue(task1.IsCompleted); + + // Try to take another (should block) + var task2 = manager.WaitForBufferSpaceAsync(CancellationToken.None); + Assert.IsFalse(task2.IsCompleted); // Would block + + // Act - Release space + manager.ReleaseBufferSpace(); + + // Wait briefly for the release to take effect + Task.Delay(50).Wait(); + + // Assert - Second wait should now complete + Assert.IsTrue(task2.IsCompleted || task2.Wait(100)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public void ReleaseBufferSpace_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + manager.Dispose(); + + // Act + manager.ReleaseBufferSpace(); + + // Assert - ExpectedException + } + + #endregion + + #region MarkDownloadComplete Tests + + [TestMethod] + public async Task MarkDownloadComplete_WithNullException_SignalsSuccess() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + manager.MarkDownloadComplete(null); + + // Assert - Reading should return 0 (EOF) + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(0, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task MarkDownloadComplete_WithException_StoresException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + var testException = new Exception("Test exception"); + + try + { + // Act + manager.MarkDownloadComplete(testException); + + // Assert - Reading should throw + byte[] readBuffer = new byte[512]; + var ex = await Assert.ThrowsExceptionAsync(async () => + { + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + Assert.IsNotNull(ex.InnerException); + Assert.AreEqual(testException, ex.InnerException); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task MarkDownloadComplete_SignalsWaitingReads() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before download is complete + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(100); + Assert.IsFalse(readTask.IsCompleted); + + // Mark download complete + manager.MarkDownloadComplete(null); + + // Assert - Read should complete with 0 bytes + int bytesRead = await readTask; + Assert.AreEqual(0, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region AddBufferAsync(IPartDataSource) Tests + + [TestMethod] + public async Task AddBufferAsync_IPartDataSource_WithStreamingDataSource_AddsSuccessfully() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create a StreamingDataSource + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + + // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(streamingSource); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task AddBufferAsync_IPartDataSource_WithBufferedDataSource_AddsSuccessfully() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create a BufferedDataSource + byte[] testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + byte[] testBuffer = ArrayPool.Shared.Rent(512); + Buffer.BlockCopy(testData, 0, testBuffer, 0, 512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + var bufferedSource = new BufferedDataSource(partBuffer); + + // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(bufferedSource); + + // Assert - Should be able to read from part 1 + byte[] readBuffer = new byte[512]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, readBuffer, 0, 512)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void AddBufferAsync_IPartDataSource_WithNull_ThrowsArgumentNullException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Act + manager.AddBuffer((IPartDataSource)null); + + // Assert - ExpectedException + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task AddBufferAsync_IPartDataSource_SignalsPartAvailable() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Start reading before part is available + var readTask = Task.Run(async () => + { + byte[] readBuffer = new byte[512]; + return await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + }); + + // Give read task time to start waiting + await Task.Delay(50); + + // Create and add streaming data source + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + + // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(streamingSource); + + // Assert - Read should complete + int bytesRead = await readTask; + Assert.AreEqual(512, bytesRead); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - StreamingDataSource Integration + + [TestMethod] + public async Task ReadAsync_FromStreamingDataSource_ReadsCorrectly() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create streaming data source + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = new GetObjectResponse + { + ContentLength = 1000, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(streamingSource); + + // Act - Read in multiple chunks + byte[] readBuffer = new byte[400]; + int bytesRead1 = await manager.ReadAsync(readBuffer, 0, 400, CancellationToken.None); + + int bytesRead2 = await manager.ReadAsync(readBuffer, 0, 400, CancellationToken.None); + + int bytesRead3 = await manager.ReadAsync(readBuffer, 0, 200, CancellationToken.None); + + // Assert + Assert.AreEqual(400, bytesRead1); + Assert.AreEqual(400, bytesRead2); + Assert.AreEqual(200, bytesRead3); + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_FromMixedSources_ReadsSequentially() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add streaming source for part 1 + var testData1 = MultipartDownloadTestHelpers.GenerateTestData(500, 0); + var response1 = new GetObjectResponse + { + ContentLength = 500, + ResponseStream = new MemoryStream(testData1) + }; + var streamingSource = new StreamingDataSource(1, response1); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer((IPartDataSource)streamingSource); + + // Add buffered source for part 2 + var testData2 = MultipartDownloadTestHelpers.GenerateTestData(500, 500); + byte[] testBuffer2 = ArrayPool.Shared.Rent(500); + Buffer.BlockCopy(testData2, 0, testBuffer2, 0, 500); + var partBuffer2 = new StreamPartBuffer(2, testBuffer2, 500); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(partBuffer2); + + // Act - Read across both parts + byte[] readBuffer = new byte[750]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 750, CancellationToken.None); + + // Assert + Assert.AreEqual(750, bytesRead); + + // Verify first 500 bytes from streaming source + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData1, readBuffer, 0, 500)); + + // Verify next 250 bytes from buffered source + byte[] expectedData2 = new byte[250]; + Array.Copy(testData2, 0, expectedData2, 0, 250); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(expectedData2, readBuffer, 500, 250)); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_StreamingDataSource_DisposesAfterCompletion() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Create streaming data source + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(1, response); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(streamingSource); + + // Act - Read all data + byte[] readBuffer = new byte[512]; + await manager.ReadAsync(readBuffer, 0, 512, CancellationToken.None); + + // Assert - StreamingDataSource should be disposed after reading + // This is verified internally by PartBufferManager + Assert.AreEqual(2, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_MultipleStreamingSources_ReadsSequentially() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + try + { + // Add 3 streaming sources + for (int i = 1; i <= 3; i++) + { + var testData = MultipartDownloadTestHelpers.GeneratePartSpecificData(300, i); + var response = new GetObjectResponse + { + ContentLength = 300, + ResponseStream = new MemoryStream(testData) + }; + var streamingSource = new StreamingDataSource(i, response); + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + manager.AddBuffer(streamingSource); + } + + // Act - Read across all parts + byte[] readBuffer = new byte[900]; + int bytesRead = await manager.ReadAsync(readBuffer, 0, 900, CancellationToken.None); + + // Assert + Assert.AreEqual(900, bytesRead); + Assert.AreEqual(4, manager.NextExpectedPartNumber); + } + finally + { + manager.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_DisposesAllDataSources() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + manager.AddBuffer(partBuffer); + + // Act + manager.Dispose(); + + // Assert - The underlying part buffer should be disposed + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_ClearsCollection() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + byte[] testBuffer = ArrayPool.Shared.Rent(512); + var partBuffer = new StreamPartBuffer(1, testBuffer, 512); + manager.AddBuffer(partBuffer); + + // Act + manager.Dispose(); + + // Assert - Should not throw (collection cleared) + // Further operations should throw ObjectDisposedException + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + // Act - Dispose multiple times + manager.Dispose(); + manager.Dispose(); + manager.Dispose(); + + // Assert - Should not throw + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task Operations_AfterDispose_ThrowObjectDisposedException() + { + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + manager.Dispose(); + + // Act + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Assert - ExpectedException + } + + #endregion + + #region Thread Safety Tests - Memory Visibility + + [TestMethod] + public async Task NextExpectedPartNumber_ConcurrentReads_SeeConsistentValue() + { + // This test verifies that the volatile keyword on _nextExpectedPartNumber + // prevents memory visibility issues when multiple producer threads + // read the value while the consumer thread updates it. + // + // Without volatile, producer threads may see stale cached values, + // causing incorrect stream-vs-buffer decisions. + // + // The test simulates BufferedPartDataHandler.ProcessPartAsync's pattern: + // Multiple download threads checking "partNumber == NextExpectedPartNumber" + // while the consumer thread increments NextExpectedPartNumber. + + // Arrange + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(); + var manager = new PartBufferManager(config); + + const int NumReaderThreads = 8; + const int NumIncrements = 100; + + var readErrors = new System.Collections.Concurrent.ConcurrentBag(); + var startSignal = new ManualResetEventSlim(false); + var stopSignal = new ManualResetEventSlim(false); + + try + { + // Start multiple reader threads that continuously read NextExpectedPartNumber + var readerTasks = new Task[NumReaderThreads]; + for (int i = 0; i < NumReaderThreads; i++) + { + int threadId = i; + readerTasks[i] = Task.Run(() => + { + // Wait for start signal + startSignal.Wait(); + + int lastSeenValue = 0; + + // Aggressively read the value until stopped + while (!stopSignal.IsSet) + { + int currentValue = manager.NextExpectedPartNumber; + + // Verify we never see a value less than what we saw before + // (This would indicate stale cached reads) + if (currentValue < lastSeenValue) + { + readErrors.Add($"Thread {threadId} saw value go backwards: {lastSeenValue} -> {currentValue}"); + } + + lastSeenValue = currentValue; + + // Spin to create cache pressure + Thread.SpinWait(10); + } + }); + } + + // Start all reader threads simultaneously + startSignal.Set(); + + // Give threads time to start reading + await Task.Delay(10); + + // Simulate consumer thread incrementing NextExpectedPartNumber + // by adding and reading parts sequentially + for (int partNum = 1; partNum <= NumIncrements; partNum++) + { + // Wait for buffer space before adding part + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + // Add part + byte[] testBuffer = ArrayPool.Shared.Rent(100); + var partBuffer = new StreamPartBuffer(partNum, testBuffer, 100); + manager.AddBuffer(partBuffer); + + // Read part completely to trigger increment + byte[] readBuffer = new byte[100]; + await manager.ReadAsync(readBuffer, 0, 100, CancellationToken.None); + + // NextExpectedPartNumber should now be partNum + 1 + + // Small spin to create timing variance + Thread.SpinWait(5); + } + + // Stop reader threads + stopSignal.Set(); + + // Wait for all readers to finish + await Task.WhenAll(readerTasks); + + // Assert - No reader should have seen inconsistent values + if (readErrors.Count > 0) + { + var errorMessage = $"Memory visibility issues detected:\n{string.Join("\n", readErrors.Take(10))}"; + if (readErrors.Count > 10) + { + errorMessage += $"\n... and {readErrors.Count - 10} more errors"; + } + Assert.Fail(errorMessage); + } + + // Verify final value is correct + Assert.AreEqual(NumIncrements + 1, manager.NextExpectedPartNumber); + } + finally + { + stopSignal.Set(); // Ensure threads stop even on failure + manager.Dispose(); + startSignal.Dispose(); + stopSignal.Dispose(); + } + } + + #endregion + + #region Semaphore MaxCount Tests + + [TestMethod] + public async Task WaitForBufferSpaceAsync_WithMaxCount_DoesNotExceedConfiguredLimit() + { + // This test verifies the fix for the double release bug. + // Before the fix: SemaphoreSlim without maxCount allowed unlimited Release() calls, + // which could corrupt the semaphore state and allow more concurrent operations than configured. + // After the fix: maxCount parameter prevents exceeding MaxInMemoryParts limit. + + // Arrange + const int maxInMemoryParts = 3; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: maxInMemoryParts); + var manager = new PartBufferManager(config); + + try + { + // Acquire all available slots + for (int i = 0; i < maxInMemoryParts; i++) + { + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + } + + // Release all acquired slots + for (int i = 0; i < maxInMemoryParts; i++) + { + manager.ReleaseBufferSpace(); + } + + // Attempt to release beyond maxCount (should throw) + Assert.ThrowsException(() => + { + manager.ReleaseBufferSpace(); + }, "Releasing beyond maxCount should throw SemaphoreFullException"); + + // Attempt one more release to confirm protection is consistent + Assert.ThrowsException(() => + { + manager.ReleaseBufferSpace(); + }, "Second excessive release should also throw SemaphoreFullException"); + + // Act - Try to acquire slots again + var acquiredSlots = 0; + for (int i = 0; i < maxInMemoryParts + 2; i++) + { + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + if (await Task.WhenAny(waitTask, Task.Delay(100)) == waitTask) + { + acquiredSlots++; + } + else + { + break; // Task didn't complete, no more slots available + } + } + + // Assert - Should only be able to acquire maxInMemoryParts slots, not more + // With maxCount fix: Can only acquire 3 slots (respects limit) + // Without maxCount fix: Could acquire 5 slots (2 extra from double releases) + Assert.AreEqual(maxInMemoryParts, acquiredSlots, + $"Semaphore should respect maxCount={maxInMemoryParts} limit despite excessive releases"); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task ReleaseBufferSpace_ExcessiveReleases_MaintainsSemaphoreIntegrity() + { + // This test verifies that excessive Release() calls don't corrupt semaphore state. + // The maxCount parameter ensures CurrentCount never exceeds MaxInMemoryParts. + + // Arrange + const int maxInMemoryParts = 5; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: maxInMemoryParts); + var manager = new PartBufferManager(config); + + try + { + // Acquire half the slots + for (int i = 0; i < maxInMemoryParts / 2; i++) + { + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + } + + // Release the acquired slots + for (int i = 0; i < maxInMemoryParts / 2; i++) + { + manager.ReleaseBufferSpace(); + } + + // Now semaphore should be at full capacity (maxInMemoryParts) + // Attempt to release beyond maxCount - each should throw + var excessiveReleaseCount = 0; + for (int i = 0; i < 5; i++) + { + try + { + manager.ReleaseBufferSpace(); + Assert.Fail($"Release #{i + 1} beyond maxCount should have thrown SemaphoreFullException"); + } + catch (SemaphoreFullException) + { + excessiveReleaseCount++; + } + } + + // Assert - All excessive releases should have thrown + Assert.AreEqual(5, excessiveReleaseCount, "All excessive releases should throw SemaphoreFullException"); + + // Act - Count how many slots are now available + var availableSlots = 0; + for (int i = 0; i < maxInMemoryParts * 2; i++) + { + var waitTask = manager.WaitForBufferSpaceAsync(CancellationToken.None); + if (waitTask.IsCompleted) + { + availableSlots++; + await waitTask; + } + else + { + break; + } + } + + // Assert - Should never exceed maxInMemoryParts + Assert.IsTrue(availableSlots <= maxInMemoryParts, + $"Available slots ({availableSlots}) should not exceed maxInMemoryParts ({maxInMemoryParts})"); + } + finally + { + manager.Dispose(); + } + } + + [TestMethod] + public async Task BufferCapacity_ConcurrentOperations_RespectsMaxCountLimit() + { + // This test simulates the real-world scenario where multiple parts are being + // processed concurrently, verifying that the maxCount parameter prevents + // exceeding the configured buffer capacity limit. + + // Arrange + const int maxInMemoryParts = 4; + const int totalParts = 10; + var config = MultipartDownloadTestHelpers.CreateBufferedDownloadConfiguration(maxInMemoryParts: maxInMemoryParts); + var manager = new PartBufferManager(config); + + try + { + var activeParts = 0; + var maxActiveParts = 0; + var lockObj = new object(); + + // Simulate concurrent part processing + var tasks = new List(); + for (int partNum = 1; partNum <= totalParts; partNum++) + { + int capturedPartNum = partNum; + tasks.Add(Task.Run(async () => + { + // Wait for buffer space (enforces maxInMemoryParts limit) + await manager.WaitForBufferSpaceAsync(CancellationToken.None); + + lock (lockObj) + { + activeParts++; + if (activeParts > maxActiveParts) + { + maxActiveParts = activeParts; + } + } + + // Simulate buffering the part + byte[] testBuffer = ArrayPool.Shared.Rent(100); + var partBuffer = new StreamPartBuffer(capturedPartNum, testBuffer, 100); + manager.AddBuffer(partBuffer); + + // Simulate some processing time + await Task.Delay(10); + + // Consumer reads the part (happens asynchronously in real scenario) + // For this test, we'll manually release after a delay + await Task.Delay(20); + + lock (lockObj) + { + activeParts--; + } + + // Release is normally done by consumer after reading part + manager.ReleaseBufferSpace(); + })); + } + + // Wait for all parts to be processed + await Task.WhenAll(tasks); + + // Assert - Should never have exceeded maxInMemoryParts + Assert.IsTrue(maxActiveParts <= maxInMemoryParts, + $"Maximum concurrent buffered parts ({maxActiveParts}) exceeded configured limit ({maxInMemoryParts})"); + } + finally + { + manager.Dispose(); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index ab1c03043c7a..76efef9abfad 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -131,6 +131,29 @@ public static void ClassCleanup() _propertyAliasesJson?.Dispose(); } + [TestMethod] + [TestCategory("S3")] + public void MapPutObjectResponse_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "PutObjectResponse", "UploadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapPutObjectResponse(sourceResponse); + }, + usesHeadersCollection: false, + usesResponseHeadersOverrides: false, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 1024; + }, + (sourceResponse, targetResponse) => + { + + }); + } + [TestMethod] [TestCategory("S3")] public void MapUploadRequest_PutObjectRequest_AllMappedProperties_WorkCorrectly() @@ -142,7 +165,12 @@ public void MapUploadRequest_PutObjectRequest_AllMappedProperties_WorkCorrectly( var simpleUploadCommand = new SimpleUploadCommand(null, null, sourceRequest); return simpleUploadCommand.ConstructRequest(); }, - usesHeadersCollection: false); + usesHeadersCollection: true, + usesResponseHeadersOverrides: false, + (sourceRequest) => + { + sourceRequest.InputStream = new MemoryStream(1024); + }); } [TestMethod] @@ -387,6 +415,33 @@ public void MapAbortMultipartUploadsCommand_MinimalRequest_DoesNotSetOptionalFie Assert.IsNull(listResult.RequestPayer, "RequestPayer should be null with minimal request"); } + [TestMethod] + [TestCategory("S3")] + public void MapPutObjectResponse_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new PutObjectResponse { Expiration = null }, + + // Test null enum conversions + new PutObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapPutObjectResponse(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + } + } + private void ValidateMappingTransferUtilityAndSdkRequests( string[] mappingPath, Func fetchTargetRequest, @@ -666,6 +721,74 @@ private void ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Definition", "UploadResponse", "PutObjectResponse" }, + "TransferUtilityUploadResponse"); + } + + [TestMethod] + [TestCategory("S3")] + public void MapCompleteMultipartUploadResponse_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "CompleteMultipartResponse", "UploadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapCompleteMultipartUploadResponse(sourceResponse); + }, + usesHeadersCollection: false, + usesResponseHeadersOverrides: false, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 1024; + }, + (sourceResponse, targetResponse) => + { + + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapCompleteMultipartUploadResponse_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new CompleteMultipartUploadResponse { Expiration = null }, + + // Test null enum conversions + new CompleteMultipartUploadResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapCompleteMultipartUploadResponse(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + } + } + + [TestMethod] + [TestCategory("S3")] + public void ValidateCompleteMultipartUploadResponseConversionCompleteness() + { + ValidateResponseDefinitionCompleteness( + new[] { "Conversion", "CompleteMultipartResponse", "UploadResponse" }, + "TransferUtilityUploadResponse"); + } + [TestMethod] [TestCategory("S3")] public void ValidatePutObjectRequestDefinitionCompleteness() @@ -683,6 +806,56 @@ public void ValidatePutObjectRequestDefinitionCompleteness() }); } + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponse_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "GetObjectResponse", "DownloadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapGetObjectResponse(sourceResponse); + }, + usesHeadersCollection: true, + usesResponseHeadersOverrides: false, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.Headers.ContentLength = 1024; + }, + (sourceResponse, targetResponse) => + { + + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponse_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new GetObjectResponse { Expiration = null }, + + // Test null enum conversions + new GetObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapGetObjectResponse(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + } + } + [TestMethod] [TestCategory("S3")] public void ValidateGetObjectRequestDefinitionCompleteness() @@ -734,6 +907,205 @@ public void ValidateTransferUtilityUploadRequestDefinitionCompleteness() }); } + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "GetObjectResponse", "DownloadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapGetObjectResponseToOpenStream(sourceResponse); + }, + usesHeadersCollection: true, + usesResponseHeadersOverrides: false, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.Headers.ContentLength = 1024; + sourceResponse.ResponseStream = new MemoryStream(new byte[1024]); + }, + (sourceResponse, targetResponse) => + { + Assert.AreSame(sourceResponse.ResponseStream, targetResponse.ResponseStream, "ResponseStream should be the same instance"); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new GetObjectResponse { Expiration = null }, + + // Test null enum conversions + new GetObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null }, + + // Test null ResponseStream + new GetObjectResponse { ResponseStream = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapGetObjectResponseToOpenStream(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + + if (testCase.ResponseStream == null) + { + Assert.IsNull(mapped.ResponseStream, "Null ResponseStream should map to null"); + } + } + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_ResponseStream_HandledCorrectly() + { + // Test with actual stream + var testStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var sourceResponse = new GetObjectResponse + { + ResponseStream = testStream, + ETag = "test-etag", + Headers = { ContentLength = 5 } + }; + + var mappedResponse = ResponseMapper.MapGetObjectResponseToOpenStream(sourceResponse); + + Assert.IsNotNull(mappedResponse, "Mapped response should not be null"); + Assert.AreSame(testStream, mappedResponse.ResponseStream, "ResponseStream should be the same instance"); + Assert.AreEqual("test-etag", mappedResponse.ETag, "Other properties should also be mapped"); + Assert.AreEqual(5, mappedResponse.Headers.ContentLength, "ContentLength should be mapped"); + + // Test with null stream + var sourceWithNullStream = new GetObjectResponse + { + ResponseStream = null, + ETag = "test-etag-2" + }; + + var mappedWithNullStream = ResponseMapper.MapGetObjectResponseToOpenStream(sourceWithNullStream); + + Assert.IsNotNull(mappedWithNullStream, "Mapped response should not be null even with null stream"); + Assert.IsNull(mappedWithNullStream.ResponseStream, "ResponseStream should be null when source is null"); + Assert.AreEqual("test-etag-2", mappedWithNullStream.ETag, "Other properties should still be mapped"); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_NullSource_ThrowsArgumentNullException() + { + Assert.ThrowsException(() => + ResponseMapper.MapGetObjectResponseToOpenStream(null), + "Mapping null source should throw ArgumentNullException"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_DisposesResponseStream() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = memoryStream, + ETag = "test-etag" + }; + + // Act + response.Dispose(); + + // Assert - accessing disposed stream should throw ObjectDisposedException + Assert.ThrowsException(() => _ = memoryStream.Length, + "Accessing Length of disposed stream should throw ObjectDisposedException"); + Assert.ThrowsException(() => _ = memoryStream.Position, + "Accessing Position of disposed stream should throw ObjectDisposedException"); + Assert.ThrowsException(() => memoryStream.Read(new byte[1], 0, 1), + "Reading from disposed stream should throw ObjectDisposedException"); + Assert.IsNull(response.ResponseStream, "ResponseStream should be null after disposal"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_MultipleCallsSafe() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = memoryStream + }; + + // Act - call dispose multiple times + response.Dispose(); + response.Dispose(); // Second call should not throw + + // Assert - stream should still be disposed after multiple dispose calls + Assert.ThrowsException(() => _ = memoryStream.Length, + "Stream should remain disposed after multiple dispose calls"); + Assert.ThrowsException(() => memoryStream.Read(new byte[1], 0, 1), + "Stream should remain disposed after multiple dispose calls"); + Assert.IsNull(response.ResponseStream, "ResponseStream should remain null after multiple dispose calls"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_NullStreamSafe() + { + // Arrange + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = null, + ETag = "test-etag" + }; + + // Act & Assert - should not throw + response.Dispose(); + Assert.IsNull(response.ResponseStream, "ResponseStream should remain null"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_UsingStatement_DisposesCorrectly() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + MemoryStream capturedStream = null; + + // Act + using (var response = new TransferUtilityOpenStreamResponse()) + { + response.ResponseStream = memoryStream; + response.ETag = "test-etag"; + capturedStream = memoryStream; + } // Dispose should be called here + + // Assert - stream should be disposed after using block + Assert.ThrowsException(() => _ = capturedStream.Length, + "Stream should be disposed after using block"); + Assert.ThrowsException(() => capturedStream.Read(new byte[1], 0, 1), + "Stream should be disposed after using block"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_ImplementsIDisposable() + { + // Assert + Assert.IsTrue(typeof(IDisposable).IsAssignableFrom(typeof(TransferUtilityOpenStreamResponse)), + "TransferUtilityOpenStreamResponse should implement IDisposable"); + } + + /// /// Generates appropriate test data for a given property type /// @@ -789,10 +1161,14 @@ private static object GenerateTestValue(Type propertyType, string propertyName) }; } - // Integer types - if (propertyType == typeof(int) || propertyType == typeof(long)) + if (propertyType == typeof(int)) + { + return 1024; + } + + if (propertyType == typeof(long)) { - return 1024; + return 1024L; // Return long literal } if (propertyType == typeof(List)) diff --git a/sdk/test/Services/S3/UnitTests/Custom/StreamPartBufferTests.cs b/sdk/test/Services/S3/UnitTests/Custom/StreamPartBufferTests.cs new file mode 100644 index 000000000000..2ddde6d48238 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/StreamPartBufferTests.cs @@ -0,0 +1,396 @@ +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Buffers; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for StreamPartBuffer class. + /// Tests ArrayPool buffer management and position tracking. + /// + [TestClass] + public class StreamPartBufferTests + { + #region Creation Tests + + [TestMethod] + public void Create_WithValidParameters_CreatesBuffer() + { + // Arrange + int partNumber = 1; + int capacity = 1024; + int actualLength = 512; + + // Act + var partBuffer = StreamPartBuffer.Create(partNumber, capacity); + + try + { + // Simulate writing data + partBuffer.SetLength(actualLength); + + // Assert + Assert.AreEqual(partNumber, partBuffer.PartNumber); + Assert.IsNotNull(partBuffer.ArrayPoolBuffer); + Assert.IsTrue(partBuffer.ArrayPoolBuffer.Length >= capacity); // ArrayPool may return larger + Assert.AreEqual(actualLength, partBuffer.Length); + Assert.AreEqual(0, partBuffer.CurrentPosition); + Assert.AreEqual(actualLength, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void Create_InitializesWithZeroLength() + { + // Arrange + int partNumber = 2; + int capacity = 2048; + + // Act + var partBuffer = StreamPartBuffer.Create(partNumber, capacity); + + try + { + // Assert - Length should be 0 until SetLength is called + Assert.AreEqual(partNumber, partBuffer.PartNumber); + Assert.IsNotNull(partBuffer.ArrayPoolBuffer); + Assert.AreEqual(0, partBuffer.Length); + Assert.AreEqual(0, partBuffer.CurrentPosition); + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region Property Tests + + [TestMethod] + public void RemainingBytes_ReturnsCorrectValue() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act & Assert - At start + Assert.AreEqual(500, partBuffer.RemainingBytes); + + // Act & Assert - After reading some bytes + partBuffer.CurrentPosition = 100; + Assert.AreEqual(400, partBuffer.RemainingBytes); + + // Act & Assert - At end + partBuffer.CurrentPosition = 500; + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void Length_ReturnsCorrectValue() + { + // Arrange + int actualLength = 1000; + var partBuffer = StreamPartBuffer.Create(1, 2048); + partBuffer.SetLength(actualLength); + + try + { + // Act & Assert + Assert.AreEqual(actualLength, partBuffer.Length); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void CurrentPosition_CanBeUpdated() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act + partBuffer.CurrentPosition = 250; + + // Assert + Assert.AreEqual(250, partBuffer.CurrentPosition); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region Reading Position Tests + + [TestMethod] + public void CurrentPosition_AfterReading_UpdatesCorrectly() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Simulate reading 100 bytes + partBuffer.CurrentPosition += 100; + Assert.AreEqual(100, partBuffer.CurrentPosition); + Assert.AreEqual(400, partBuffer.RemainingBytes); + + // Simulate reading another 150 bytes + partBuffer.CurrentPosition += 150; + Assert.AreEqual(250, partBuffer.CurrentPosition); + Assert.AreEqual(250, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void RemainingBytes_WhenFullyRead_ReturnsZero() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act - Read all bytes + partBuffer.CurrentPosition = 500; + + // Assert + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region SetLength Tests + + [TestMethod] + public void SetLength_WithValidLength_SetsCorrectly() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + + // Act + partBuffer.SetLength(500); + + try + { + // Assert + Assert.AreEqual(500, partBuffer.Length); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(InvalidOperationException))] + public void SetLength_CalledTwice_ThrowsException() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + try + { + // Act - Try to set length again + partBuffer.SetLength(600); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void SetLength_WithNegativeLength_ThrowsException() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + + try + { + // Act + partBuffer.SetLength(-1); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public void SetLength_ExceedsBufferCapacity_ThrowsException() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + + try + { + // Act - Try to set length larger than buffer capacity + partBuffer.SetLength(10000); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_ReturnsBufferToArrayPool() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + // Act + partBuffer.Dispose(); + + // Assert - Buffer should be returned (verified by checking it's nulled) + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + // Act - Dispose multiple times + partBuffer.Dispose(); + partBuffer.Dispose(); + partBuffer.Dispose(); + + // Assert - Should not throw + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + [TestMethod] + public void Dispose_SetsArrayPoolBufferToNull() + { + // Arrange + var partBuffer = StreamPartBuffer.Create(1, 1024); + partBuffer.SetLength(500); + + // Act + partBuffer.Dispose(); + + // Assert + Assert.IsNull(partBuffer.ArrayPoolBuffer); + } + + #endregion + + #region Edge Cases + + [TestMethod] + public void Constructor_WithEmptyBuffer_HandlesCorrectly() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 0); + + try + { + // Assert + Assert.AreEqual(0, partBuffer.Length); + Assert.AreEqual(0, partBuffer.RemainingBytes); + Assert.AreEqual(0, partBuffer.CurrentPosition); + } + finally + { + partBuffer.Dispose(); + } + } + + [TestMethod] + public void RemainingBytes_WhenPositionBeyondLength_ReturnsZero() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(1, testBuffer, 500); + + try + { + // Act - Position beyond actual length + partBuffer.CurrentPosition = 600; + + // Assert - RemainingBytes uses Math.Max(0, ...) to prevent negative + Assert.AreEqual(0, partBuffer.RemainingBytes); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + + #region ToString Tests + + [TestMethod] + public void ToString_ReturnsExpectedFormat() + { + // Arrange + byte[] testBuffer = ArrayPool.Shared.Rent(1024); + var partBuffer = new StreamPartBuffer(3, testBuffer, 500); + + try + { + partBuffer.CurrentPosition = 100; + + // Act + string result = partBuffer.ToString(); + + // Assert - Verify format contains key information + Assert.IsTrue(result.Contains("Part=3")); + Assert.IsTrue(result.Contains("500 bytes")); + Assert.IsTrue(result.Contains("pos=100")); + Assert.IsTrue(result.Contains("remaining=400")); + } + finally + { + partBuffer.Dispose(); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/StreamingDataSourceTests.cs b/sdk/test/Services/S3/UnitTests/Custom/StreamingDataSourceTests.cs new file mode 100644 index 000000000000..1822b7f85d4f --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/StreamingDataSourceTests.cs @@ -0,0 +1,708 @@ +using Amazon.S3.Model; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace AWSSDK.UnitTests +{ + /// + /// Unit tests for StreamingDataSource class. + /// Tests direct streaming from GetObjectResponse without buffering. + /// + [TestClass] + public class StreamingDataSourceTests + { + #region Constructor Tests + + [TestMethod] + public void Constructor_WithValidResponse_CreatesDataSource() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + + // Act + var dataSource = new StreamingDataSource(1, response); + + // Assert + Assert.IsNotNull(dataSource); + Assert.AreEqual(1, dataSource.PartNumber); + Assert.IsFalse(dataSource.IsComplete); + + // Cleanup + dataSource.Dispose(); + } + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public void Constructor_WithNullResponse_ThrowsArgumentNullException() + { + // Act + var dataSource = new StreamingDataSource(1, null); + + // Assert - ExpectedException + } + + [TestMethod] + public void Constructor_SetsPartNumberCorrectly() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + + // Act + var dataSource = new StreamingDataSource(5, response); + + // Assert + Assert.AreEqual(5, dataSource.PartNumber); + + // Cleanup + dataSource.Dispose(); + } + + #endregion + + #region Property Tests + + [TestMethod] + public void PartNumber_ReturnsConstructorValue() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(3, response); + + try + { + // Act & Assert + Assert.AreEqual(3, dataSource.PartNumber); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public void IsComplete_InitiallyFalse() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task IsComplete_BecomesTrue_AfterFullRead() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read all data + byte[] buffer = new byte[512]; + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + + // Assert + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task IsComplete_BecomesTrue_WhenExpectedBytesReached() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read in chunks + byte[] buffer = new byte[400]; + await dataSource.ReadAsync(buffer, 0, 400, CancellationToken.None); + await dataSource.ReadAsync(buffer, 0, 400, CancellationToken.None); + await dataSource.ReadAsync(buffer, 0, 200, CancellationToken.None); + + // Assert + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Basic Functionality + + [TestMethod] + public async Task ReadAsync_ReadsDataFromResponseStream() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act + byte[] buffer = new byte[512]; + int bytesRead = await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + + // Assert + Assert.AreEqual(512, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, 512)); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_SupportsPartialReads() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read first 300 bytes + byte[] buffer = new byte[300]; + int bytesRead = await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + + // Assert + Assert.AreEqual(300, bytesRead); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch(testData, buffer, 0, 300)); + Assert.IsFalse(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_SupportsMultipleSequentialReads() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read in chunks + byte[] buffer1 = new byte[400]; + int bytesRead1 = await dataSource.ReadAsync(buffer1, 0, 400, CancellationToken.None); + + byte[] buffer2 = new byte[400]; + int bytesRead2 = await dataSource.ReadAsync(buffer2, 0, 400, CancellationToken.None); + + byte[] buffer3 = new byte[200]; + int bytesRead3 = await dataSource.ReadAsync(buffer3, 0, 200, CancellationToken.None); + + // Assert + Assert.AreEqual(400, bytesRead1); + Assert.AreEqual(400, bytesRead2); + Assert.AreEqual(200, bytesRead3); + Assert.IsTrue(dataSource.IsComplete); + + // Verify data correctness + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + testData, buffer1, 0, 400)); + + byte[] expectedData2 = new byte[400]; + Array.Copy(testData, 400, expectedData2, 0, 400); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + expectedData2, buffer2, 0, 400)); + + byte[] expectedData3 = new byte[200]; + Array.Copy(testData, 800, expectedData3, 0, 200); + Assert.IsTrue(MultipartDownloadTestHelpers.VerifyDataMatch( + expectedData3, buffer3, 0, 200)); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_WithOffset_ReadsIntoBufferCorrectly() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(300, 0); + var response = CreateMockGetObjectResponse(300, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read into buffer with offset + byte[] buffer = new byte[500]; + int bytesRead = await dataSource.ReadAsync(buffer, 100, 300, CancellationToken.None); + + // Assert + Assert.AreEqual(300, bytesRead); + + // Verify data was written at correct offset + for (int i = 0; i < 300; i++) + { + Assert.AreEqual(testData[i], buffer[100 + i]); + } + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Parameter Validation + + [TestMethod] + [ExpectedException(typeof(ArgumentNullException))] + public async Task ReadAsync_WithNullBuffer_ThrowsArgumentNullException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act + await dataSource.ReadAsync(null, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeOffset_ThrowsArgumentOutOfRangeException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + byte[] buffer = new byte[512]; + + try + { + // Act + await dataSource.ReadAsync(buffer, -1, 512, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentOutOfRangeException))] + public async Task ReadAsync_WithNegativeCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + byte[] buffer = new byte[512]; + + try + { + // Act + await dataSource.ReadAsync(buffer, 0, -1, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + [ExpectedException(typeof(ArgumentException))] + public async Task ReadAsync_WithOffsetCountExceedingBounds_ThrowsArgumentException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + byte[] buffer = new byte[512]; + + try + { + // Act - offset + count exceeds buffer length + await dataSource.ReadAsync(buffer, 400, 200, CancellationToken.None); + + // Assert - ExpectedException + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Completion Detection + + [TestMethod] + public async Task ReadAsync_ReturnsZero_WhenStreamExhausted() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(100, 0); + var response = CreateMockGetObjectResponse(100, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read all data + byte[] buffer1 = new byte[100]; + int bytesRead1 = await dataSource.ReadAsync(buffer1, 0, 100, CancellationToken.None); + + // Try to read more + byte[] buffer2 = new byte[100]; + int bytesRead2 = await dataSource.ReadAsync(buffer2, 0, 100, CancellationToken.None); + + // Assert + Assert.AreEqual(100, bytesRead1); + Assert.AreEqual(0, bytesRead2); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_AfterComplete_ReturnsZero() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(512, 0); + var response = CreateMockGetObjectResponse(512, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read all data to completion + byte[] buffer1 = new byte[512]; + await dataSource.ReadAsync(buffer1, 0, 512, CancellationToken.None); + + Assert.IsTrue(dataSource.IsComplete); + + // Try to read again after completion + byte[] buffer2 = new byte[100]; + int bytesRead = await dataSource.ReadAsync(buffer2, 0, 100, CancellationToken.None); + + // Assert + Assert.AreEqual(0, bytesRead); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_MarksComplete_WhenExpectedBytesReached() + { + // Arrange - Create response with specific ContentLength + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act - Read exactly expected bytes + byte[] buffer = new byte[1000]; + int bytesRead = await dataSource.ReadAsync(buffer, 0, 1000, CancellationToken.None); + + // Assert + Assert.AreEqual(1000, bytesRead); + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Progress Tracking + + [TestMethod] + public async Task ReadAsync_TracksProgressCorrectly() + { + // Arrange + var testData = MultipartDownloadTestHelpers.GenerateTestData(1000, 0); + var response = CreateMockGetObjectResponse(1000, testData); + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert - Track progress through multiple reads + Assert.IsFalse(dataSource.IsComplete); + + byte[] buffer = new byte[300]; + await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + Assert.IsFalse(dataSource.IsComplete); // 300/1000 + + await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + Assert.IsFalse(dataSource.IsComplete); // 600/1000 + + await dataSource.ReadAsync(buffer, 0, 300, CancellationToken.None); + Assert.IsFalse(dataSource.IsComplete); // 900/1000 + + await dataSource.ReadAsync(buffer, 0, 100, CancellationToken.None); + Assert.IsTrue(dataSource.IsComplete); // 1000/1000 + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region ReadAsync Tests - Error Handling + + [TestMethod] + public async Task ReadAsync_OnStreamError_MarksComplete() + { + // Arrange - Create a response with a stream that throws + var errorStream = new FaultyStream(new IOException("Stream read error")); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = errorStream + }; + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert + byte[] buffer = new byte[512]; + await Assert.ThrowsExceptionAsync(async () => + { + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + }); + + // Should mark as complete on error + Assert.IsTrue(dataSource.IsComplete); + } + finally + { + dataSource.Dispose(); + } + } + + [TestMethod] + public async Task ReadAsync_PropagatesStreamExceptions() + { + // Arrange + var errorStream = new FaultyStream(new InvalidOperationException("Test error")); + var response = new GetObjectResponse + { + ContentLength = 512, + ResponseStream = errorStream + }; + var dataSource = new StreamingDataSource(1, response); + + try + { + // Act & Assert + byte[] buffer = new byte[512]; + await Assert.ThrowsExceptionAsync(async () => + { + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + }); + } + finally + { + dataSource.Dispose(); + } + } + + #endregion + + #region Disposal Tests + + [TestMethod] + public void Dispose_ReleasesResponse() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + // Act + dataSource.Dispose(); + + // Assert - Response stream should be disposed + // After disposal, stream is either null or no longer readable + Assert.IsTrue(response.ResponseStream == null || !response.ResponseStream.CanRead); + } + + [TestMethod] + public void Dispose_MultipleCalls_IsIdempotent() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + + // Act - Dispose multiple times + dataSource.Dispose(); + dataSource.Dispose(); + dataSource.Dispose(); + + // Assert - Should not throw + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public async Task ReadAsync_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + dataSource.Dispose(); + + // Act + byte[] buffer = new byte[512]; + await dataSource.ReadAsync(buffer, 0, 512, CancellationToken.None); + + // Assert - ExpectedException + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public void PartNumber_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + dataSource.Dispose(); + + // Act + var partNumber = dataSource.PartNumber; + + // Assert - ExpectedException + } + + [TestMethod] + [ExpectedException(typeof(ObjectDisposedException))] + public void IsComplete_AfterDispose_ThrowsObjectDisposedException() + { + // Arrange + var response = CreateMockGetObjectResponse(512); + var dataSource = new StreamingDataSource(1, response); + dataSource.Dispose(); + + // Act + var isComplete = dataSource.IsComplete; + + // Assert - ExpectedException + } + + #endregion + + #region Helper Methods + + /// + /// Creates a mock GetObjectResponse with test data. + /// + private GetObjectResponse CreateMockGetObjectResponse(long contentLength, byte[] testData = null) + { + if (testData == null) + { + testData = MultipartDownloadTestHelpers.GenerateTestData((int)contentLength, 0); + } + + return new GetObjectResponse + { + ContentLength = contentLength, + ResponseStream = new MemoryStream(testData), + ETag = "test-etag" + }; + } + + /// + /// Stream that throws exceptions for testing error handling. + /// + private class FaultyStream : Stream + { + private readonly Exception _exception; + + public FaultyStream(Exception exception) + { + _exception = exception; + } + + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + public override long Length => throw new NotSupportedException(); + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override void Flush() { } + + public override int Read(byte[] buffer, int offset, int count) + { + throw _exception; + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + throw _exception; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/UnitTests/Custom/UploadDirectoryCommandTests.cs b/sdk/test/Services/S3/UnitTests/Custom/UploadDirectoryCommandTests.cs new file mode 100644 index 000000000000..f2e30d440455 --- /dev/null +++ b/sdk/test/Services/S3/UnitTests/Custom/UploadDirectoryCommandTests.cs @@ -0,0 +1,227 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Internal; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Moq; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK.UnitTests +{ + [TestClass] + public class UploadDirectoryCommandTests + { + private string _testDirectory; + private Mock _mockS3Client; + private TransferUtilityConfig _config; + + [TestInitialize] + public void Setup() + { + _testDirectory = Path.Combine(Path.GetTempPath(), "UploadDirectoryCommandTests_" + Guid.NewGuid().ToString("N").Substring(0, 8)); + Directory.CreateDirectory(_testDirectory); + + // Create some test files + File.WriteAllBytes(Path.Combine(_testDirectory, "file1.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file2.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file3.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file4.dat"), GenerateTestData(1024)); + File.WriteAllBytes(Path.Combine(_testDirectory, "file5.dat"), GenerateTestData(1024)); + + _mockS3Client = new Mock(); + _config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 4 + }; + + var s3Config = new AmazonS3Config + { + BufferSize = 8192, + }; + _mockS3Client.Setup(c => c.Config).Returns(s3Config); + } + + [TestCleanup] + public void Cleanup() + { + if (Directory.Exists(_testDirectory)) + { + try + { + Directory.Delete(_testDirectory, true); + } + catch + { + // Ignore cleanup errors in tests + } + } + } + + #region Concurrency Control Tests + + /// + /// Tests that ConcurrentServiceRequests setting actually limits concurrent file uploads. + /// Expected: Max 2 concurrent uploads (ConcurrentServiceRequests = 2) + /// + [TestMethod] + public async Task ExecuteAsync_ConcurrentServiceRequests_RespectsLimit() + { + // Arrange + var request = CreateUploadDirectoryRequest(); + request.UploadFilesConcurrently = true; + + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 2 + }; + + var currentConcurrentUploads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + // Map filenames to sizes + var files = Directory.GetFiles(_testDirectory).ToDictionary(Path.GetFileName, f => new FileInfo(f).Length); + + // Mock PutObjectAsync to track concurrency + _mockS3Client.Setup(c => c.PutObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (PutObjectRequest req, CancellationToken ct) => + { + lock (concurrencyLock) + { + currentConcurrentUploads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentUploads); + } + + try + { + await Task.Delay(100, ct); + var fileName = Path.GetFileName(req.FilePath); + var fileSize = files[fileName]; + return new PutObjectResponse + { + ETag = "\"test-etag\"", + HttpStatusCode = System.Net.HttpStatusCode.OK, + }; + } + finally + { + lock (concurrencyLock) + { + currentConcurrentUploads--; + } + } + }); + + var utility = new TransferUtility(_mockS3Client.Object, config); + var command = new UploadDirectoryCommand(utility, config, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(2, config.ConcurrentServiceRequests, "Test setup verification"); + Assert.IsTrue(maxObservedConcurrency <= config.ConcurrentServiceRequests, + $"Max concurrent uploads ({maxObservedConcurrency}) should not exceed ConcurrentServiceRequests ({config.ConcurrentServiceRequests})"); + } + + /// + /// Tests that sequential mode (UploadFilesConcurrently = false) uploads only one file at a time. + /// Expected: Max 1 concurrent upload (sequential mode) + /// + [TestMethod] + public async Task ExecuteAsync_SequentialMode_UploadsOneAtATime() + { + // Arrange + var request = CreateUploadDirectoryRequest(); + request.UploadFilesConcurrently = false; + + var config = new TransferUtilityConfig + { + ConcurrentServiceRequests = 10 + }; + + var currentConcurrentUploads = 0; + var maxObservedConcurrency = 0; + var concurrencyLock = new object(); + + var files = Directory.GetFiles(_testDirectory).Take(3).ToDictionary(Path.GetFileName, f => new FileInfo(f).Length); + + // Mock PutObjectAsync to track concurrency + _mockS3Client.Setup(c => c.PutObjectAsync( + It.IsAny(), + It.IsAny())) + .Returns(async (PutObjectRequest req, CancellationToken ct) => + { + lock (concurrencyLock) + { + currentConcurrentUploads++; + maxObservedConcurrency = Math.Max(maxObservedConcurrency, currentConcurrentUploads); + } + + try + { + await Task.Delay(50, ct); + return new PutObjectResponse + { + ETag = "\"test-etag\"", + HttpStatusCode = System.Net.HttpStatusCode.OK, + }; + } + finally + { + lock (concurrencyLock) + { + currentConcurrentUploads--; + } + } + }); + + var utility = new TransferUtility(_mockS3Client.Object, config); + var command = new UploadDirectoryCommand(utility, config, request); + + // Act + await command.ExecuteAsync(CancellationToken.None); + + // Assert + Assert.AreEqual(1, maxObservedConcurrency, + $"Sequential mode should only upload 1 file at a time, but observed {maxObservedConcurrency}"); + } + + #endregion + + #region Helper Methods + + private TransferUtilityUploadDirectoryRequest CreateUploadDirectoryRequest( + string bucketName = "test-bucket", + string s3Directory = "prefix", + string localDirectory = null) + { + localDirectory = localDirectory ?? _testDirectory; + + return new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + KeyPrefix = s3Directory, + Directory = localDirectory + }; + } + + private byte[] GenerateTestData(int size) + { + var data = new byte[size]; + var random = new Random(42); // Fixed seed for reproducible tests + random.NextBytes(data); + return data; + } + + #endregion + } +}